summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug3
-rw-r--r--drivers/gpu/drm/i915/Makefile6
-rw-r--r--drivers/gpu/drm/i915/dvo.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c777
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c265
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h474
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c752
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c209
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c163
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c225
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c86
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h5
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c362
-rw-r--r--drivers/gpu/drm/i915/i915_params.c32
-rw-r--r--drivers/gpu/drm/i915/i915_params.h11
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c6
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c20
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c23
-rw-r--r--drivers/gpu/drm/i915/i915_query.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h241
-rw-r--r--drivers/gpu/drm/i915/i915_request.c220
-rw-r--r--drivers/gpu/drm/i915/i915_request.h6
-rw-r--r--drivers/gpu/drm/i915/i915_reset.c1389
-rw-r--r--drivers/gpu/drm/i915/i915_reset.h56
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c13
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c31
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h8
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h30
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c36
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c7
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c5
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c23
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c74
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c60
-rw-r--r--drivers/gpu/drm/i915/intel_color.c258
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c54
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c93
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c416
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c107
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h31
-rw-r--r--drivers/gpu/drm/i915/intel_display.c386
-rw-r--r--drivers/gpu/drm/i915/intel_display.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c544
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c32
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c18
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c205
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h53
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h199
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c24
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c179
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c33
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c18
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c6
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c29
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c3
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c183
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c21
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c19
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c6
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c7
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c314
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h10
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c20
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c13
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.h1
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c1
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c11
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c27
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c32
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1123
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c81
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c508
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h96
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c594
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c91
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c15
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_uc_fw.c11
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c508
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c5
-rw-r--r--drivers/gpu/drm/i915/intel_wopcm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c131
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c42
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c47
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c140
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c98
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c118
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c113
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c77
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.c85
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_live_test.h35
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c86
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c327
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c31
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c120
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c110
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h4
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c36
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi_pll.c31
138 files changed, 7070 insertions, 6832 deletions
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index f51a116543f7..26835d174939 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1276,6 +1276,8 @@ static const struct dpcd_quirk dpcd_quirk_list[] = {
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID_ANY, true, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
/* LG LP140WF6-SPM1 eDP panel */
{ OUI(0x00, 0x22, 0xb9), DEVICE_ID('s', 'i', 'v', 'a', 'r', 'T'), false, BIT(DP_DPCD_QUIRK_CONSTANT_N) },
+ /* Apple panels need some additional handling to support PSR */
+ { OUI(0x00, 0x10, 0xfa), DEVICE_ID_ANY, false, BIT(DP_DPCD_QUIRK_NO_PSR) }
};
#undef OUI
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 9e36ffb5eb7c..ad4d71161dda 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -21,11 +21,11 @@ config DRM_I915_DEBUG
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
+ select STACKDEPOT
select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
- select STACKDEPOT if DRM=y # for DRM_DEBUG_MM
select DRM_DEBUG_SELFTEST
select SW_SYNC # signaling validation framework (igt/syncobj*)
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
@@ -173,6 +173,7 @@ config DRM_I915_DEBUG_RUNTIME_PM
bool "Enable extra state checking for runtime PM"
depends on DRM_I915
default n
+ select STACKDEPOT
help
Choose this option to turn on extra state checking for the
runtime PM functionality. This may introduce overhead during
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 19b5fe5016bf..f050759686ca 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -40,9 +40,10 @@ i915-y := i915_drv.o \
i915_mm.o \
i915_params.o \
i915_pci.o \
- i915_suspend.o \
- i915_syncmap.o \
+ i915_reset.o \
+ i915_suspend.o \
i915_sw_fence.o \
+ i915_syncmap.o \
i915_sysfs.o \
intel_csr.o \
intel_device_info.o \
@@ -166,6 +167,7 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/i915_random.o \
selftests/i915_selftest.o \
selftests/igt_flush_test.o \
+ selftests/igt_live_test.o \
selftests/igt_reset.o \
selftests/igt_spinner.o
diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
index 5e6a3013da49..16e0345b711f 100644
--- a/drivers/gpu/drm/i915/dvo.h
+++ b/drivers/gpu/drm/i915/dvo.h
@@ -24,7 +24,6 @@
#define _INTEL_DVO_H
#include <linux/i2c.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 359d37d5c958..1fa2f65c3cd1 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -180,7 +180,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
@@ -206,7 +206,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return 0;
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
@@ -219,7 +219,7 @@ out_free_fence:
vgpu->fence.regs[i] = NULL;
}
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
return -ENOSPC;
}
@@ -317,7 +317,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
intel_runtime_pm_get(dev_priv);
_clear_vgpu_fence(vgpu);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b4ab1dad0143..435c746c3f73 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -597,7 +597,7 @@ static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index c32e7d5e8629..f04b3b965bfc 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
mutex_unlock(&vgpu->gvt->sched_lock);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1ad8c5e1455d..3816dcae2185 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -997,7 +997,7 @@ complete:
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
- intel_runtime_pm_put(gvt->dev_priv);
+ intel_runtime_pm_put_unchecked(gvt->dev_priv);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1451,7 +1451,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
mutex_lock(&dev_priv->drm.struct_mutex);
ret = intel_gvt_scan_and_shadow_workload(workload);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put_unchecked(dev_priv);
}
if (ret && (vgpu_is_vm_unhealthy(ret))) {
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index c628be05fbfe..e1c860f80eb0 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -148,10 +148,10 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
- if (IS_GEN8(gvt->dev_priv))
+ if (IS_GEN(gvt->dev_priv, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s",
vgpu_types[i].name);
- else if (IS_GEN9(gvt->dev_priv))
+ else if (IS_GEN(gvt->dev_priv, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 95478db9998b..33e8eed64423 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -865,7 +865,7 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
int cmd_table_count;
int ret;
- if (!IS_GEN7(engine->i915))
+ if (!IS_GEN(engine->i915, 7))
return;
switch (engine->id) {
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9bad6a32adae..24d6d4ce14ef 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,8 @@
#include "intel_drv.h"
#include "intel_guc_submission.h"
+#include "i915_reset.h"
+
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
return to_i915(node->minor->dev);
@@ -48,7 +50,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
intel_device_info_dump_flags(info, &p);
- intel_device_info_dump_runtime(info, &p);
+ intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
intel_driver_caps_print(&dev_priv->caps, &p);
kernel_param_lock(THIS_MODULE);
@@ -297,11 +299,12 @@ out:
}
struct file_stats {
- struct drm_i915_file_private *file_priv;
+ struct i915_address_space *vm;
unsigned long count;
u64 total, unbound;
u64 global, shared;
u64 active, inactive;
+ u64 closed;
};
static int per_file_stats(int id, void *ptr, void *data)
@@ -326,9 +329,7 @@ static int per_file_stats(int id, void *ptr, void *data)
if (i915_vma_is_ggtt(vma)) {
stats->global += vma->node.size;
} else {
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
-
- if (ppgtt->vm.file != stats->file_priv)
+ if (vma->vm != stats->vm)
continue;
}
@@ -336,6 +337,9 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->active += vma->node.size;
else
stats->inactive += vma->node.size;
+
+ if (i915_vma_is_closed(vma))
+ stats->closed += vma->node.size;
}
return 0;
@@ -343,7 +347,7 @@ static int per_file_stats(int id, void *ptr, void *data)
#define print_file_stats(m, name, stats) do { \
if (stats.count) \
- seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound)\n", \
+ seq_printf(m, "%s: %lu objects, %llu bytes (%llu active, %llu inactive, %llu global, %llu shared, %llu unbound, %llu closed)\n", \
name, \
stats.count, \
stats.total, \
@@ -351,20 +355,19 @@ static int per_file_stats(int id, void *ptr, void *data)
stats.inactive, \
stats.global, \
stats.shared, \
- stats.unbound); \
+ stats.unbound, \
+ stats.closed); \
} while (0)
static void print_batch_pool_stats(struct seq_file *m,
struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj;
- struct file_stats stats;
struct intel_engine_cs *engine;
+ struct file_stats stats = {};
enum intel_engine_id id;
int j;
- memset(&stats, 0, sizeof(stats));
-
for_each_engine(engine, dev_priv, id) {
for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
list_for_each_entry(obj,
@@ -377,44 +380,47 @@ static void print_batch_pool_stats(struct seq_file *m,
print_file_stats(m, "[k]batch pool", stats);
}
-static int per_file_ctx_stats(int idx, void *ptr, void *data)
+static void print_context_stats(struct seq_file *m,
+ struct drm_i915_private *i915)
{
- struct i915_gem_context *ctx = ptr;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ struct file_stats kstats = {};
+ struct i915_gem_context *ctx;
- for_each_engine(engine, ctx->i915, id) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ list_for_each_entry(ctx, &i915->contexts.list, link) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
- if (ce->state)
- per_file_stats(0, ce->state->obj, data);
- if (ce->ring)
- per_file_stats(0, ce->ring->vma->obj, data);
- }
+ for_each_engine(engine, i915, id) {
+ struct intel_context *ce = to_intel_context(ctx, engine);
- return 0;
-}
+ if (ce->state)
+ per_file_stats(0, ce->state->obj, &kstats);
+ if (ce->ring)
+ per_file_stats(0, ce->ring->vma->obj, &kstats);
+ }
-static void print_context_stats(struct seq_file *m,
- struct drm_i915_private *dev_priv)
-{
- struct drm_device *dev = &dev_priv->drm;
- struct file_stats stats;
- struct drm_file *file;
+ if (!IS_ERR_OR_NULL(ctx->file_priv)) {
+ struct file_stats stats = { .vm = &ctx->ppgtt->vm, };
+ struct drm_file *file = ctx->file_priv->file;
+ struct task_struct *task;
+ char name[80];
- memset(&stats, 0, sizeof(stats));
+ spin_lock(&file->table_lock);
+ idr_for_each(&file->object_idr, per_file_stats, &stats);
+ spin_unlock(&file->table_lock);
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->kernel_context)
- per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
+ rcu_read_lock();
+ task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
+ snprintf(name, sizeof(name), "%s/%d",
+ task ? task->comm : "<unknown>",
+ ctx->user_handle);
+ rcu_read_unlock();
- list_for_each_entry(file, &dev->filelist, lhead) {
- struct drm_i915_file_private *fpriv = file->driver_priv;
- idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
+ print_file_stats(m, name, stats);
+ }
}
- mutex_unlock(&dev->struct_mutex);
- print_file_stats(m, "[k]contexts", stats);
+ print_file_stats(m, "[k]contexts", kstats);
}
static int i915_gem_object_info(struct seq_file *m, void *data)
@@ -426,14 +432,9 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
u64 size, mapped_size, purgeable_size, dpy_size, huge_size;
struct drm_i915_gem_object *obj;
unsigned int page_sizes = 0;
- struct drm_file *file;
char buf[80];
int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
seq_printf(m, "%u objects, %llu bytes\n",
dev_priv->mm.object_count,
dev_priv->mm.object_memory);
@@ -514,43 +515,14 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
buf, sizeof(buf)));
seq_putc(m, '\n');
- print_batch_pool_stats(m, dev_priv);
- mutex_unlock(&dev->struct_mutex);
- mutex_lock(&dev->filelist_mutex);
- print_context_stats(m, dev_priv);
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
- struct file_stats stats;
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_request *request;
- struct task_struct *task;
-
- mutex_lock(&dev->struct_mutex);
-
- memset(&stats, 0, sizeof(stats));
- stats.file_priv = file->driver_priv;
- spin_lock(&file->table_lock);
- idr_for_each(&file->object_idr, per_file_stats, &stats);
- spin_unlock(&file->table_lock);
- /*
- * Although we have a valid reference on file->pid, that does
- * not guarantee that the task_struct who called get_pid() is
- * still alive (e.g. get_pid(current) => fork() => exit()).
- * Therefore, we need to protect this ->comm access using RCU.
- */
- request = list_first_entry_or_null(&file_priv->mm.request_list,
- struct i915_request,
- client_link);
- rcu_read_lock();
- task = pid_task(request && request->gem_context->pid ?
- request->gem_context->pid : file->pid,
- PIDTYPE_PID);
- print_file_stats(m, task ? task->comm : "<unknown>", stats);
- rcu_read_unlock();
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
- mutex_unlock(&dev->struct_mutex);
- }
- mutex_unlock(&dev->filelist_mutex);
+ print_batch_pool_stats(m, dev_priv);
+ print_context_stats(m, dev_priv);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -656,10 +628,12 @@ static void gen8_display_interrupt_info(struct seq_file *m)
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain)) {
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+ if (!wakeref) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@@ -674,7 +648,7 @@ static void gen8_display_interrupt_info(struct seq_file *m)
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IER(pipe)));
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -704,11 +678,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int i, pipe;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
if (IS_CHERRYVIEW(dev_priv)) {
+ intel_wakeref_t pref;
+
seq_printf(m, "Master Interrupt Control:\t%08x\n",
I915_READ(GEN8_MASTER_IRQ));
@@ -724,8 +701,9 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
enum intel_display_power_domain power_domain;
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain)) {
+ pref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+ if (!pref) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@@ -735,17 +713,17 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, pref);
}
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
seq_printf(m, "Port hotplug:\t%08x\n",
I915_READ(PORT_HOTPLUG_EN));
seq_printf(m, "DPFLIPSTAT:\t%08x\n",
I915_READ(VLV_DPFLIPSTAT));
seq_printf(m, "DPINVGTT:\t%08x\n",
I915_READ(DPINVGTT));
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref);
for (i = 0; i < 4; i++) {
seq_printf(m, "GT Interrupt IMR %d:\t%08x\n",
@@ -808,10 +786,12 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IMR));
for_each_pipe(dev_priv, pipe) {
enum intel_display_power_domain power_domain;
+ intel_wakeref_t pref;
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain)) {
+ pref = intel_display_power_get_if_enabled(dev_priv,
+ power_domain);
+ if (!pref) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@@ -820,7 +800,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, pref);
}
seq_printf(m, "Master IER:\t%08x\n",
@@ -907,7 +887,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
}
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -980,12 +960,13 @@ static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
struct i915_gpu_state *gpu;
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(i915);
- gpu = i915_capture_gpu_state(i915);
- intel_runtime_pm_put(i915);
- if (!gpu)
- return -ENOMEM;
+ gpu = NULL;
+ with_intel_runtime_pm(i915, wakeref)
+ gpu = i915_capture_gpu_state(i915);
+ if (IS_ERR(gpu))
+ return PTR_ERR(gpu);
file->private_data = gpu;
return 0;
@@ -1018,7 +999,13 @@ i915_error_state_write(struct file *filp,
static int i915_error_state_open(struct inode *inode, struct file *file)
{
- file->private_data = i915_first_error_state(inode->i_private);
+ struct i915_gpu_state *error;
+
+ error = i915_first_error_state(inode->i_private);
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ file->private_data = error;
return 0;
}
@@ -1032,39 +1019,16 @@ static const struct file_operations i915_error_state_fops = {
};
#endif
-static int
-i915_next_seqno_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- intel_runtime_pm_get(dev_priv);
- ret = i915_gem_set_global_seqno(dev, val);
- intel_runtime_pm_put(dev_priv);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
- NULL, i915_next_seqno_set,
- "0x%llx\n");
-
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ intel_wakeref_t wakeref;
int ret = 0;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
u16 rgvswctl = I915_READ16(MEMSWCTL);
u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1274,7 +1238,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return ret;
}
@@ -1313,6 +1277,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
u64 acthd[I915_NUM_ENGINES];
u32 seqno[I915_NUM_ENGINES];
struct intel_instdone instdone;
+ intel_wakeref_t wakeref;
enum intel_engine_id id;
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
@@ -1331,17 +1296,15 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
return 0;
}
- intel_runtime_pm_get(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ for_each_engine(engine, dev_priv, id) {
+ acthd[id] = intel_engine_get_active_head(engine);
+ seqno[id] = intel_engine_get_seqno(engine);
+ }
- for_each_engine(engine, dev_priv, id) {
- acthd[id] = intel_engine_get_active_head(engine);
- seqno[id] = intel_engine_get_seqno(engine);
+ intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
}
- intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
-
- intel_runtime_pm_put(dev_priv);
-
if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
seq_printf(m, "Hangcheck active, timer fires in %dms\n",
jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
@@ -1616,18 +1579,17 @@ static int gen6_drpc_info(struct seq_file *m)
static int i915_drpc_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- int err;
-
- intel_runtime_pm_get(dev_priv);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = vlv_drpc_info(m);
- else if (INTEL_GEN(dev_priv) >= 6)
- err = gen6_drpc_info(m);
- else
- err = ironlake_drpc_info(m);
-
- intel_runtime_pm_put(dev_priv);
+ intel_wakeref_t wakeref;
+ int err = -ENODEV;
+
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ err = vlv_drpc_info(m);
+ else if (INTEL_GEN(dev_priv) >= 6)
+ err = gen6_drpc_info(m);
+ else
+ err = ironlake_drpc_info(m);
+ }
return err;
}
@@ -1649,11 +1611,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_fbc *fbc = &dev_priv->fbc;
+ intel_wakeref_t wakeref;
if (!HAS_FBC(dev_priv))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&fbc->lock);
if (intel_fbc_is_active(dev_priv))
@@ -1680,7 +1643,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
}
mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -1725,11 +1688,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
static int i915_ips_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
if (!HAS_IPS(dev_priv))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "Enabled by kernel parameter: %s\n",
yesno(i915_modparams.enable_ips));
@@ -1743,7 +1707,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
seq_puts(m, "Currently: disabled\n");
}
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -1751,10 +1715,10 @@ static int i915_ips_status(struct seq_file *m, void *unused)
static int i915_sr_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
bool sr_enabled = false;
- intel_runtime_pm_get(dev_priv);
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
if (INTEL_GEN(dev_priv) >= 9)
/* no global SR status; inspect per-plane WM */;
@@ -1770,8 +1734,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- intel_runtime_pm_put(dev_priv);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
@@ -1780,31 +1743,24 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_emon_status(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- unsigned long temp, chipset, gfx;
- int ret;
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
- if (!IS_GEN5(dev_priv))
+ if (!IS_GEN(i915, 5))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
+ with_intel_runtime_pm(i915, wakeref) {
+ unsigned long temp, chipset, gfx;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
+ temp = i915_mch_val(i915);
+ chipset = i915_chipset_val(i915);
+ gfx = i915_gfx_val(i915);
- temp = i915_mch_val(dev_priv);
- chipset = i915_chipset_val(dev_priv);
- gfx = i915_gfx_val(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- seq_printf(m, "GMCH temp: %ld\n", temp);
- seq_printf(m, "Chipset power: %ld\n", chipset);
- seq_printf(m, "GFX power: %ld\n", gfx);
- seq_printf(m, "Total power: %ld\n", chipset + gfx);
-
- intel_runtime_pm_put(dev_priv);
+ seq_printf(m, "GMCH temp: %ld\n", temp);
+ seq_printf(m, "Chipset power: %ld\n", chipset);
+ seq_printf(m, "GFX power: %ld\n", gfx);
+ seq_printf(m, "Total power: %ld\n", chipset + gfx);
+ }
return 0;
}
@@ -1814,13 +1770,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
unsigned int max_gpu_freq, min_gpu_freq;
+ intel_wakeref_t wakeref;
int gpu_freq, ia_freq;
int ret;
if (!HAS_LLC(dev_priv))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
ret = mutex_lock_interruptible(&dev_priv->pcu_lock);
if (ret)
@@ -1853,7 +1810,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
mutex_unlock(&dev_priv->pcu_lock);
out:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return ret;
}
@@ -2026,15 +1983,16 @@ static const char *swizzle_string(unsigned swizzle)
static int i915_swizzle_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "bit6 swizzle for X-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_x));
seq_printf(m, "bit6 swizzle for Y-tiling = %s\n",
swizzle_string(dev_priv->mm.bit_6_swizzle_y));
- if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv)) {
+ if (IS_GEN_RANGE(dev_priv, 3, 4)) {
seq_printf(m, "DDC = 0x%08x\n",
I915_READ(DCC));
seq_printf(m, "DDC2 = 0x%08x\n",
@@ -2065,129 +2023,11 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
seq_puts(m, "L-shaped memory detected\n");
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
-static int per_file_ctx(int id, void *ptr, void *data)
-{
- struct i915_gem_context *ctx = ptr;
- struct seq_file *m = data;
- struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-
- if (!ppgtt) {
- seq_printf(m, " no ppgtt for context %d\n",
- ctx->user_handle);
- return 0;
- }
-
- if (i915_gem_context_is_default(ctx))
- seq_puts(m, " default context:\n");
- else
- seq_printf(m, " context %d:\n", ctx->user_handle);
- ppgtt->debug_dump(ppgtt, m);
-
- return 0;
-}
-
-static void gen8_ppgtt_info(struct seq_file *m,
- struct drm_i915_private *dev_priv)
-{
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int i;
-
- if (!ppgtt)
- return;
-
- for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s\n", engine->name);
- for (i = 0; i < 4; i++) {
- u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i));
- pdp <<= 32;
- pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i));
- seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp);
- }
- }
-}
-
-static void gen6_ppgtt_info(struct seq_file *m,
- struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (IS_GEN6(dev_priv))
- seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
-
- for_each_engine(engine, dev_priv, id) {
- seq_printf(m, "%s\n", engine->name);
- if (IS_GEN7(dev_priv))
- seq_printf(m, "GFX_MODE: 0x%08x\n",
- I915_READ(RING_MODE_GEN7(engine)));
- seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE(engine)));
- seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE_READ(engine)));
- seq_printf(m, "PP_DIR_DCLV: 0x%08x\n",
- I915_READ(RING_PP_DIR_DCLV(engine)));
- }
- if (dev_priv->mm.aliasing_ppgtt) {
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
- seq_puts(m, "aliasing PPGTT:\n");
- seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd.base.ggtt_offset);
-
- ppgtt->debug_dump(ppgtt, m);
- }
-
- seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
-}
-
-static int i915_ppgtt_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct drm_file *file;
- int ret;
-
- mutex_lock(&dev->filelist_mutex);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out_unlock;
-
- intel_runtime_pm_get(dev_priv);
-
- if (INTEL_GEN(dev_priv) >= 8)
- gen8_ppgtt_info(m, dev_priv);
- else if (INTEL_GEN(dev_priv) >= 6)
- gen6_ppgtt_info(m, dev_priv);
-
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct task_struct *task;
-
- task = get_pid_task(file->pid, PIDTYPE_PID);
- if (!task) {
- ret = -ESRCH;
- goto out_rpm;
- }
- seq_printf(m, "\nproc: %s\n", task->comm);
- put_task_struct(task);
- idr_for_each(&file_priv->context_idr, per_file_ctx,
- (void *)(unsigned long)m);
- }
-
-out_rpm:
- intel_runtime_pm_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-out_unlock:
- mutex_unlock(&dev->filelist_mutex);
- return ret;
-}
-
static int count_irq_waiters(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -2220,9 +2060,10 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 act_freq = rps->cur_freq;
+ intel_wakeref_t wakeref;
struct drm_file *file;
- if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+ with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
act_freq = vlv_punit_read(dev_priv,
@@ -2233,7 +2074,6 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
act_freq = intel_get_cagf(dev_priv,
I915_READ(GEN6_RPSTAT1));
}
- intel_runtime_pm_put(dev_priv);
}
seq_printf(m, "RPS enabled? %d\n", rps->enabled);
@@ -2316,6 +2156,7 @@ static int i915_llc(struct seq_file *m, void *data)
static int i915_huc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
struct drm_printer p;
if (!HAS_HUC(dev_priv))
@@ -2324,9 +2165,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
- intel_runtime_pm_get(dev_priv);
- seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
return 0;
}
@@ -2334,8 +2174,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
static int i915_guc_load_status_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
struct drm_printer p;
- u32 tmp, i;
if (!HAS_GUC(dev_priv))
return -ENODEV;
@@ -2343,22 +2183,23 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
p = drm_seq_file_printer(m);
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
- intel_runtime_pm_get(dev_priv);
-
- tmp = I915_READ(GUC_STATUS);
-
- seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
- seq_printf(m, "\tBootrom status = 0x%x\n",
- (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
- seq_printf(m, "\tuKernel status = 0x%x\n",
- (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
- seq_printf(m, "\tMIA Core status = 0x%x\n",
- (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
- seq_puts(m, "\nScratch registers:\n");
- for (i = 0; i < 16; i++)
- seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
-
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ u32 tmp = I915_READ(GUC_STATUS);
+ u32 i;
+
+ seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
+ seq_printf(m, "\tBootrom status = 0x%x\n",
+ (tmp & GS_BOOTROM_MASK) >> GS_BOOTROM_SHIFT);
+ seq_printf(m, "\tuKernel status = 0x%x\n",
+ (tmp & GS_UKERNEL_MASK) >> GS_UKERNEL_SHIFT);
+ seq_printf(m, "\tMIA Core status = 0x%x\n",
+ (tmp & GS_MIA_MASK) >> GS_MIA_SHIFT);
+ seq_puts(m, "\nScratch registers:\n");
+ for (i = 0; i < 16; i++) {
+ seq_printf(m, "\t%2d: \t0x%x\n",
+ i, I915_READ(SOFT_SCRATCH(i)));
+ }
+ }
return 0;
}
@@ -2410,7 +2251,7 @@ static void i915_guc_client_info(struct seq_file *m,
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
- uint64_t tot = 0;
+ u64 tot = 0;
seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
client->priority, client->stage_id, client->proc_desc_offset);
@@ -2716,6 +2557,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
u32 psrperf = 0;
bool enabled = false;
bool sink_support;
@@ -2728,7 +2570,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
if (!sink_support)
return 0;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->psr.lock);
seq_printf(m, "PSR mode: %s\n",
@@ -2767,7 +2609,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
dev_priv->psr.last_exit);
}
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -2776,6 +2618,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
struct drm_modeset_acquire_ctx ctx;
+ intel_wakeref_t wakeref;
int ret;
if (!CAN_PSR(dev_priv))
@@ -2783,7 +2626,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
@@ -2798,7 +2641,7 @@ retry:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return ret;
}
@@ -2823,24 +2666,20 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
unsigned long long power;
+ intel_wakeref_t wakeref;
u32 units;
if (INTEL_GEN(dev_priv) < 6)
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
-
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power)) {
- intel_runtime_pm_put(dev_priv);
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
return -ENODEV;
- }
units = (power & 0x1f00) >> 8;
- power = I915_READ(MCH_SECP_NRG_STTS);
- power = (1000000 * power) >> units; /* convert to uJ */
-
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ power = I915_READ(MCH_SECP_NRG_STTS);
+ power = (1000000 * power) >> units; /* convert to uJ */
seq_printf(m, "%llu", power);
return 0;
@@ -2854,6 +2693,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
if (!HAS_RUNTIME_PM(dev_priv))
seq_puts(m, "Runtime power management not supported\n");
+ seq_printf(m, "Runtime power status: %s\n",
+ enableddisabled(!dev_priv->power_domains.wakeref));
+
seq_printf(m, "GPU idle: %s (epoch %u)\n",
yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "IRQs disabled: %s\n",
@@ -2868,6 +2710,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
pci_power_name(pdev->current_state),
pdev->current_state);
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)) {
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ print_intel_runtime_pm_wakeref(dev_priv, &p);
+ }
+
return 0;
}
@@ -2902,6 +2750,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
static int i915_dmc_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
struct intel_csr *csr;
if (!HAS_CSR(dev_priv))
@@ -2909,7 +2758,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
csr = &dev_priv->csr;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
seq_printf(m, "path: %s\n", csr->fw_path);
@@ -2935,7 +2784,7 @@ out:
seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -3120,14 +2969,13 @@ static const char *plane_type(enum drm_plane_type type)
return "unknown";
}
-static const char *plane_rotation(unsigned int rotation)
+static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
{
- static char buf[48];
/*
* According to doc only one DRM_MODE_ROTATE_ is allowed but this
* will print them all to visualize if the values are misused
*/
- snprintf(buf, sizeof(buf),
+ snprintf(buf, bufsize,
"%s%s%s%s%s%s(0x%08x)",
(rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
(rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
@@ -3136,8 +2984,6 @@ static const char *plane_rotation(unsigned int rotation)
(rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
(rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
rotation);
-
- return buf;
}
static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
@@ -3150,6 +2996,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
struct drm_plane_state *state;
struct drm_plane *plane = &intel_plane->base;
struct drm_format_name_buf format_name;
+ char rot_str[48];
if (!plane->state) {
seq_puts(m, "plane->state is NULL!\n");
@@ -3165,6 +3012,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
sprintf(format_name.str, "N/A");
}
+ plane_rotation(rot_str, sizeof(rot_str), state->rotation);
+
seq_printf(m, "\t--Plane id %d: type=%s, crtc_pos=%4dx%4d, crtc_size=%4dx%4d, src_pos=%d.%04ux%d.%04u, src_size=%d.%04ux%d.%04u, format=%s, rotation=%s\n",
plane->base.id,
plane_type(intel_plane->base.type),
@@ -3179,7 +3028,7 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc)
(state->src_h >> 16),
((state->src_h & 0xffff) * 15625) >> 10,
format_name.str,
- plane_rotation(state->rotation));
+ rot_str);
}
}
@@ -3218,8 +3067,10 @@ static int i915_display_info(struct seq_file *m, void *unused)
struct intel_crtc *crtc;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(dev_priv);
- intel_runtime_pm_get(dev_priv);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
for_each_intel_crtc(dev, crtc) {
@@ -3267,7 +3118,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -3276,23 +3127,24 @@ static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct intel_engine_cs *engine;
+ intel_wakeref_t wakeref;
enum intel_engine_id id;
struct drm_printer p;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
seq_printf(m, "GT awake? %s (epoch %u)\n",
yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
seq_printf(m, "CS timestamp frequency: %u kHz\n",
- dev_priv->info.cs_timestamp_frequency_khz);
+ RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
p = drm_seq_file_printer(m);
for_each_engine(engine, dev_priv, id)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return 0;
}
@@ -3302,7 +3154,7 @@ static int i915_rcs_topology(struct seq_file *m, void *unused)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_printer p = drm_seq_file_printer(m);
- intel_device_info_dump_topology(&INTEL_INFO(dev_priv)->sseu, &p);
+ intel_device_info_dump_topology(&RUNTIME_INFO(dev_priv)->sseu, &p);
return 0;
}
@@ -3405,20 +3257,21 @@ static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- int ret;
+ intel_wakeref_t wakeref;
bool enable;
+ int ret;
ret = kstrtobool_from_user(ubuf, len, &enable);
if (ret < 0)
return ret;
- intel_runtime_pm_get(dev_priv);
- if (!dev_priv->ipc_enabled && enable)
- DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->wm.distrust_bios_wm = true;
- dev_priv->ipc_enabled = enable;
- intel_enable_ipc(dev_priv);
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ if (!dev_priv->ipc_enabled && enable)
+ DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
+ dev_priv->wm.distrust_bios_wm = true;
+ dev_priv->ipc_enabled = enable;
+ intel_enable_ipc(dev_priv);
+ }
return len;
}
@@ -3786,7 +3639,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
{
struct drm_i915_private *dev_priv = m->private;
struct drm_device *dev = &dev_priv->drm;
@@ -3829,7 +3682,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
static int pri_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- const uint16_t *latencies;
+ const u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -3844,7 +3697,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
static int spr_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- const uint16_t *latencies;
+ const u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -3859,7 +3712,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
static int cur_wm_latency_show(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = m->private;
- const uint16_t *latencies;
+ const u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -3902,12 +3755,12 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
}
static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, uint16_t wm[8])
+ size_t len, loff_t *offp, u16 wm[8])
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
struct drm_device *dev = &dev_priv->drm;
- uint16_t new[8] = { 0 };
+ u16 new[8] = { 0 };
int num_levels;
int level;
int ret;
@@ -3952,7 +3805,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- uint16_t *latencies;
+ u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -3967,7 +3820,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- uint16_t *latencies;
+ u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -3982,7 +3835,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
{
struct seq_file *m = file->private_data;
struct drm_i915_private *dev_priv = m->private;
- uint16_t *latencies;
+ u16 *latencies;
if (INTEL_GEN(dev_priv) >= 9)
latencies = dev_priv->wm.skl_latency;
@@ -4184,11 +4037,12 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
+ intel_wakeref_t wakeref;
int ret = 0;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (val & DROP_RESET_ACTIVE && !intel_engines_are_idle(i915))
i915_gem_set_wedged(i915);
@@ -4206,9 +4060,6 @@ i915_drop_caches_set(void *data, u64 val)
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (ret == 0 && val & DROP_RESET_SEQNO)
- ret = i915_gem_set_global_seqno(&i915->drm, 1);
-
if (val & DROP_RETIRE)
i915_retire_requests(i915);
@@ -4246,7 +4097,7 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_drain_freed_objects(i915);
out:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
return ret;
}
@@ -4259,16 +4110,14 @@ static int
i915_cache_sharing_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
- u32 snpcr;
+ intel_wakeref_t wakeref;
+ u32 snpcr = 0;
- if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
+ if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
-
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
-
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
*val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
@@ -4279,24 +4128,25 @@ static int
i915_cache_sharing_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
- u32 snpcr;
+ intel_wakeref_t wakeref;
- if (!(IS_GEN6(dev_priv) || IS_GEN7(dev_priv)))
+ if (!(IS_GEN_RANGE(dev_priv, 6, 7)))
return -ENODEV;
if (val > 3)
return -EINVAL;
- intel_runtime_pm_get(dev_priv);
DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ u32 snpcr;
+
+ /* Update the cache sharing policy here as well */
+ snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
+ snpcr &= ~GEN6_MBC_SNPCR_MASK;
+ snpcr |= val << GEN6_MBC_SNPCR_SHIFT;
+ I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
+ }
- /* Update the cache sharing policy here as well */
- snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
- snpcr &= ~GEN6_MBC_SNPCR_MASK;
- snpcr |= (val << GEN6_MBC_SNPCR_SHIFT);
- I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
-
- intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -4341,7 +4191,7 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
#define SS_MAX 6
- const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
int s, ss;
@@ -4397,7 +4247,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
struct sseu_dev_info *sseu)
{
#define SS_MAX 3
- const struct intel_device_info *info = INTEL_INFO(dev_priv);
+ const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv);
u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
int s, ss;
@@ -4425,7 +4275,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
if (IS_GEN9_BC(dev_priv))
sseu->subslice_mask[s] =
- INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+ RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
for (ss = 0; ss < info->sseu.max_subslices; ss++) {
unsigned int eu_cnt;
@@ -4459,10 +4309,10 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
if (sseu->slice_mask) {
sseu->eu_per_subslice =
- INTEL_INFO(dev_priv)->sseu.eu_per_subslice;
+ RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice;
for (s = 0; s < fls(sseu->slice_mask); s++) {
sseu->subslice_mask[s] =
- INTEL_INFO(dev_priv)->sseu.subslice_mask[s];
+ RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s];
}
sseu->eu_total = sseu->eu_per_subslice *
sseu_subslice_total(sseu);
@@ -4470,7 +4320,7 @@ static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv,
/* subtract fused off EU(s) from enabled slice(s) */
for (s = 0; s < fls(sseu->slice_mask); s++) {
u8 subslice_7eu =
- INTEL_INFO(dev_priv)->sseu.subslice_7eu[s];
+ RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s];
sseu->eu_total -= hweight8(subslice_7eu);
}
@@ -4518,34 +4368,32 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct sseu_dev_info sseu;
+ intel_wakeref_t wakeref;
if (INTEL_GEN(dev_priv) < 8)
return -ENODEV;
seq_puts(m, "SSEU Device Info\n");
- i915_print_sseu_info(m, true, &INTEL_INFO(dev_priv)->sseu);
+ i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu);
seq_puts(m, "SSEU Device Status\n");
memset(&sseu, 0, sizeof(sseu));
- sseu.max_slices = INTEL_INFO(dev_priv)->sseu.max_slices;
- sseu.max_subslices = INTEL_INFO(dev_priv)->sseu.max_subslices;
+ sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices;
+ sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices;
sseu.max_eus_per_subslice =
- INTEL_INFO(dev_priv)->sseu.max_eus_per_subslice;
-
- intel_runtime_pm_get(dev_priv);
-
- if (IS_CHERRYVIEW(dev_priv)) {
- cherryview_sseu_device_status(dev_priv, &sseu);
- } else if (IS_BROADWELL(dev_priv)) {
- broadwell_sseu_device_status(dev_priv, &sseu);
- } else if (IS_GEN9(dev_priv)) {
- gen9_sseu_device_status(dev_priv, &sseu);
- } else if (INTEL_GEN(dev_priv) >= 10) {
- gen10_sseu_device_status(dev_priv, &sseu);
+ RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice;
+
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ if (IS_CHERRYVIEW(dev_priv))
+ cherryview_sseu_device_status(dev_priv, &sseu);
+ else if (IS_BROADWELL(dev_priv))
+ broadwell_sseu_device_status(dev_priv, &sseu);
+ else if (IS_GEN(dev_priv, 9))
+ gen9_sseu_device_status(dev_priv, &sseu);
+ else if (INTEL_GEN(dev_priv) >= 10)
+ gen10_sseu_device_status(dev_priv, &sseu);
}
- intel_runtime_pm_put(dev_priv);
-
i915_print_sseu_info(m, false, &sseu);
return 0;
@@ -4558,7 +4406,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
if (INTEL_GEN(i915) < 6)
return 0;
- intel_runtime_pm_get(i915);
+ file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
intel_uncore_forcewake_user_get(i915);
return 0;
@@ -4572,7 +4420,8 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
return 0;
intel_uncore_forcewake_user_put(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915,
+ (intel_wakeref_t)(uintptr_t)file->private_data);
return 0;
}
@@ -4899,7 +4748,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_context_status", i915_context_status, 0},
{"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
- {"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_llc", i915_llc, 0},
{"i915_edp_psr_status", i915_edp_psr_status, 0},
{"i915_energy_uJ", i915_energy_uJ, 0},
@@ -4934,7 +4782,6 @@ static const struct i915_debugfs_files {
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
{"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
- {"i915_next_seqno", &i915_next_seqno_fops},
{"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
@@ -5007,7 +4854,7 @@ static int i915_dpcd_show(struct seq_file *m, void *data)
struct drm_connector *connector = m->private;
struct intel_dp *intel_dp =
enc_to_intel_dp(&intel_attached_encoder(connector)->base);
- uint8_t buf[16];
+ u8 buf[16];
ssize_t err;
int i;
@@ -5081,6 +4928,105 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_device *dev = connector->dev;
+ struct drm_crtc *crtc;
+ struct intel_dp *intel_dp;
+ struct drm_modeset_acquire_ctx ctx;
+ struct intel_crtc_state *crtc_state = NULL;
+ int ret = 0;
+ bool try_again = false;
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+ do {
+ try_again = false;
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ &ctx);
+ if (ret) {
+ ret = -EINTR;
+ break;
+ }
+ crtc = connector->state->crtc;
+ if (connector->status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ break;
+ }
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret) {
+ try_again = true;
+ continue;
+ }
+ break;
+ } else if (ret) {
+ break;
+ }
+ intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ crtc_state = to_intel_crtc_state(crtc->state);
+ seq_printf(m, "DSC_Enabled: %s\n",
+ yesno(crtc_state->dsc_params.compression_enable));
+ seq_printf(m, "DSC_Sink_Support: %s\n",
+ yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ if (!intel_dp_is_edp(intel_dp))
+ seq_printf(m, "FEC_Sink_Support: %s\n",
+ yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
+ } while (try_again);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_fec_support_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ bool dsc_enable = false;
+ int ret;
+ struct drm_connector *connector =
+ ((struct seq_file *)file->private_data)->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (len == 0)
+ return 0;
+
+ DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
+ len);
+
+ ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
+ if (ret < 0)
+ return ret;
+
+ DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
+ (dsc_enable) ? "true" : "false");
+ intel_dp->force_dsc_en = dsc_enable;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_dsc_fec_support_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_fec_support_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fec_support_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_fec_support_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_fec_support_write
+};
+
/**
* i915_debugfs_connector_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -5093,6 +5039,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
int i915_debugfs_connector_add(struct drm_connector *connector)
{
struct dentry *root = connector->debugfs_entry;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
/* The connector must have been registered beforehands. */
if (!root)
@@ -5117,5 +5064,11 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
connector, &i915_hdcp_sink_capability_fops);
}
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP))
+ debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
+ connector, &i915_dsc_fec_support_fops);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index b310a897a4ad..f462a4d28af4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -41,7 +41,6 @@
#include <linux/vt.h>
#include <acpi/video.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/i915_drm.h>
@@ -49,6 +48,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "i915_pmu.h"
+#include "i915_reset.h"
#include "i915_query.h"
#include "i915_vgpu.h"
#include "intel_drv.h"
@@ -132,15 +132,15 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
switch (id) {
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN5(dev_priv));
+ WARN_ON(!IS_GEN(dev_priv, 5));
return PCH_IBX;
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
- WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+ WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
return PCH_CPT;
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found PantherPoint PCH\n");
- WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv));
+ WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
/* PantherPoint is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
@@ -217,9 +217,9 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
- else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
@@ -349,7 +349,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = HAS_LEGACY_SEMAPHORES(dev_priv);
+ value = 0;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
@@ -358,12 +358,12 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = i915_cmd_parser_get_version(dev_priv);
break;
case I915_PARAM_SUBSLICE_TOTAL:
- value = sseu_subslice_total(&INTEL_INFO(dev_priv)->sseu);
+ value = sseu_subslice_total(&RUNTIME_INFO(dev_priv)->sseu);
if (!value)
return -ENODEV;
break;
case I915_PARAM_EU_TOTAL:
- value = INTEL_INFO(dev_priv)->sseu.eu_total;
+ value = RUNTIME_INFO(dev_priv)->sseu.eu_total;
if (!value)
return -ENODEV;
break;
@@ -380,7 +380,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_POOLED_EU(dev_priv);
break;
case I915_PARAM_MIN_EU_IN_POOL:
- value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
+ value = RUNTIME_INFO(dev_priv)->sseu.min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
value = intel_huc_check_status(&dev_priv->huc);
@@ -430,17 +430,17 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = intel_engines_has_context_isolation(dev_priv);
break;
case I915_PARAM_SLICE_MASK:
- value = INTEL_INFO(dev_priv)->sseu.slice_mask;
+ value = RUNTIME_INFO(dev_priv)->sseu.slice_mask;
if (!value)
return -ENODEV;
break;
case I915_PARAM_SUBSLICE_MASK:
- value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
+ value = RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0];
if (!value)
return -ENODEV;
break;
case I915_PARAM_CS_TIMESTAMP_FREQUENCY:
- value = 1000 * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz;
+ value = 1000 * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz;
break;
case I915_PARAM_MMAP_GTT_COHERENT:
value = INTEL_INFO(dev_priv)->has_coherent_ggtt;
@@ -906,6 +906,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
mutex_init(&dev_priv->pps_mutex);
i915_memcpy_init_early(dev_priv);
+ intel_runtime_pm_init_early(dev_priv);
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
@@ -966,7 +967,7 @@ static int i915_mmio_setup(struct drm_i915_private *dev_priv)
int mmio_bar;
int mmio_size;
- mmio_bar = IS_GEN2(dev_priv) ? 1 : 0;
+ mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
/*
* Before gen4, the registers and the GTT are behind different BARs.
* However, from gen4 onwards, the registers and the GTT are shared
@@ -1341,7 +1342,7 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
/* Need to calculate bandwidth only for Gen9 */
if (IS_BROXTON(dev_priv))
ret = bxt_get_dram_info(dev_priv);
- else if (IS_GEN9(dev_priv))
+ else if (IS_GEN(dev_priv, 9))
ret = skl_get_dram_info(dev_priv);
else
ret = skl_dram_get_channels_info(dev_priv);
@@ -1374,7 +1375,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
- intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
+ intel_device_info_runtime_init(dev_priv);
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
@@ -1436,7 +1437,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
/* overlay on gen2 is broken and can't address above 1G */
- if (IS_GEN2(dev_priv)) {
+ if (IS_GEN(dev_priv, 2)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
DRM_ERROR("failed to set DMA mask\n");
@@ -1574,7 +1575,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
acpi_video_register();
}
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
intel_gpu_ips_init(dev_priv);
intel_audio_init(dev_priv);
@@ -1636,8 +1637,14 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer("i915 device info:");
- intel_device_info_dump(&dev_priv->info, &p);
- intel_device_info_dump_runtime(&dev_priv->info, &p);
+ drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+ INTEL_DEVID(dev_priv),
+ INTEL_REVID(dev_priv),
+ intel_platform_name(INTEL_INFO(dev_priv)->platform),
+ INTEL_GEN(dev_priv));
+
+ intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
+ intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
@@ -1674,7 +1681,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Setup the write-once "constant" device info */
device_info = mkwrite_device_info(i915);
memcpy(device_info, match_info, sizeof(*device_info));
- device_info->device_id = pdev->device;
+ RUNTIME_INFO(i915)->device_id = pdev->device;
BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
BITS_PER_TYPE(device_info->platform_mask));
@@ -1774,6 +1781,9 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_unregister(dev_priv);
+ /* Flush any external code that still may be under the RCU lock */
+ synchronize_rcu();
+
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
@@ -1802,8 +1812,7 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_cleanup_mmio(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
-
- WARN_ON(atomic_read(&dev_priv->runtime_pm.wakeref_count));
+ intel_runtime_pm_cleanup(dev_priv);
}
static void i915_driver_release(struct drm_device *dev)
@@ -2005,6 +2014,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
out:
enable_rpm_wakeref_asserts(dev_priv);
+ if (!dev_priv->uncore.user_forcewake.count)
+ intel_runtime_pm_cleanup(dev_priv);
return ret;
}
@@ -2174,7 +2185,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
intel_power_domains_resume(dev_priv);
- intel_engines_sanitize(dev_priv);
+ intel_engines_sanitize(dev_priv, true);
enable_rpm_wakeref_asserts(dev_priv);
@@ -2195,210 +2206,6 @@ static int i915_resume_switcheroo(struct drm_device *dev)
return i915_drm_resume(dev);
}
-/**
- * i915_reset - reset chip after a hang
- * @i915: #drm_i915_private to reset
- * @stalled_mask: mask of the stalled engines with the guilty requests
- * @reason: user error message for why we are resetting
- *
- * Reset the chip. Useful if a hang is detected. Marks the device as wedged
- * on failure.
- *
- * Caller must hold the struct_mutex.
- *
- * Procedure is fairly simple:
- * - reset the chip using the reset reg
- * - re-init context state
- * - re-init hardware status page
- * - re-init ring buffer
- * - re-init interrupt state
- * - re-init display
- */
-void i915_reset(struct drm_i915_private *i915,
- unsigned int stalled_mask,
- const char *reason)
-{
- struct i915_gpu_error *error = &i915->gpu_error;
- int ret;
- int i;
-
- GEM_TRACE("flags=%lx\n", error->flags);
-
- might_sleep();
- lockdep_assert_held(&i915->drm.struct_mutex);
- GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
-
- if (!test_bit(I915_RESET_HANDOFF, &error->flags))
- return;
-
- /* Clear any previous failed attempts at recovery. Time to try again. */
- if (!i915_gem_unset_wedged(i915))
- goto wakeup;
-
- if (reason)
- dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
- error->reset_count++;
-
- ret = i915_gem_reset_prepare(i915);
- if (ret) {
- dev_err(i915->drm.dev, "GPU recovery failed\n");
- goto taint;
- }
-
- if (!intel_has_gpu_reset(i915)) {
- if (i915_modparams.reset)
- dev_err(i915->drm.dev, "GPU reset not supported\n");
- else
- DRM_DEBUG_DRIVER("GPU reset disabled\n");
- goto error;
- }
-
- for (i = 0; i < 3; i++) {
- ret = intel_gpu_reset(i915, ALL_ENGINES);
- if (ret == 0)
- break;
-
- msleep(100);
- }
- if (ret) {
- dev_err(i915->drm.dev, "Failed to reset chip\n");
- goto taint;
- }
-
- /* Ok, now get things going again... */
-
- /*
- * Everything depends on having the GTT running, so we need to start
- * there.
- */
- ret = i915_ggtt_enable_hw(i915);
- if (ret) {
- DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n",
- ret);
- goto error;
- }
-
- i915_gem_reset(i915, stalled_mask);
- intel_overlay_reset(i915);
-
- /*
- * Next we need to restore the context, but we don't use those
- * yet either...
- *
- * Ring buffer needs to be re-initialized in the KMS case, or if X
- * was running at the time of the reset (i.e. we weren't VT
- * switched away).
- */
- ret = i915_gem_init_hw(i915);
- if (ret) {
- DRM_ERROR("Failed to initialise HW following reset (%d)\n",
- ret);
- goto error;
- }
-
- i915_queue_hangcheck(i915);
-
-finish:
- i915_gem_reset_finish(i915);
-wakeup:
- clear_bit(I915_RESET_HANDOFF, &error->flags);
- wake_up_bit(&error->flags, I915_RESET_HANDOFF);
- return;
-
-taint:
- /*
- * History tells us that if we cannot reset the GPU now, we
- * never will. This then impacts everything that is run
- * subsequently. On failing the reset, we mark the driver
- * as wedged, preventing further execution on the GPU.
- * We also want to go one step further and add a taint to the
- * kernel so that any subsequent faults can be traced back to
- * this failure. This is important for CI, where if the
- * GPU/driver fails we would like to reboot and restart testing
- * rather than continue on into oblivion. For everyone else,
- * the system should still plod along, but they have been warned!
- */
- add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
-error:
- i915_gem_set_wedged(i915);
- i915_retire_requests(i915);
- goto finish;
-}
-
-static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
-{
- return intel_gpu_reset(dev_priv, intel_engine_flag(engine));
-}
-
-/**
- * i915_reset_engine - reset GPU engine to recover from a hang
- * @engine: engine to reset
- * @msg: reason for GPU reset; or NULL for no dev_notice()
- *
- * Reset a specific GPU engine. Useful if a hang is detected.
- * Returns zero on successful reset or otherwise an error code.
- *
- * Procedure is:
- * - identifies the request that caused the hang and it is dropped
- * - reset engine (which will force the engine to idle)
- * - re-init/configure engine
- */
-int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
-{
- struct i915_gpu_error *error = &engine->i915->gpu_error;
- struct i915_request *active_request;
- int ret;
-
- GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
- GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
-
- active_request = i915_gem_reset_prepare_engine(engine);
- if (IS_ERR_OR_NULL(active_request)) {
- /* Either the previous reset failed, or we pardon the reset. */
- ret = PTR_ERR(active_request);
- goto out;
- }
-
- if (msg)
- dev_notice(engine->i915->drm.dev,
- "Resetting %s for %s\n", engine->name, msg);
- error->reset_engine_count[engine->id]++;
-
- if (!engine->i915->guc.execbuf_client)
- ret = intel_gt_reset_engine(engine->i915, engine);
- else
- ret = intel_guc_reset_engine(&engine->i915->guc, engine);
- if (ret) {
- /* If we fail here, we expect to fallback to a global reset */
- DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
- engine->i915->guc.execbuf_client ? "GuC " : "",
- engine->name, ret);
- goto out;
- }
-
- /*
- * The request that caused the hang is stuck on elsp, we know the
- * active request and can drop it, adjust head to skip the offending
- * request to resume executing remaining requests in the queue.
- */
- i915_gem_reset_engine(engine, active_request, true);
-
- /*
- * The engine and its registers (and workarounds in case of render)
- * have been reset to their default values. Follow the init_ring
- * process to program RING_MODE, HWSP and re-enable submission.
- */
- ret = engine->init_hw(engine);
- if (ret)
- goto out;
-
-out:
- intel_engine_cancel_stop_cs(engine);
- i915_gem_reset_finish_engine(engine);
- return ret;
-}
-
static int i915_pm_prepare(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
@@ -2959,7 +2766,7 @@ static int intel_runtime_suspend(struct device *kdev)
}
enable_rpm_wakeref_asserts(dev_priv);
- WARN_ON_ONCE(atomic_read(&dev_priv->runtime_pm.wakeref_count));
+ intel_runtime_pm_cleanup(dev_priv);
if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b1c31967194b..03db011caa8e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -45,8 +45,8 @@
#include <linux/pm_qos.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
+#include <linux/stackdepot.h>
-#include <drm/drmP.h>
#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
@@ -54,6 +54,7 @@
#include <drm/drm_cache.h>
#include <drm/drm_util.h>
#include <drm/drm_dsc.h>
+#include <drm/drm_connector.h>
#include "i915_fixed.h"
#include "i915_params.h"
@@ -90,8 +91,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20181204"
-#define DRIVER_TIMESTAMP 1543944377
+#define DRIVER_DATE "20190110"
+#define DRIVER_TIMESTAMP 1547162337
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -130,6 +131,8 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__)
+typedef depot_stack_handle_t intel_wakeref_t;
+
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -281,16 +284,14 @@ struct drm_i915_display_funcs {
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
- int (*compute_intermediate_wm)(struct drm_device *dev,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *newstate);
+ int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
void (*initial_watermarks)(struct intel_atomic_state *state,
struct intel_crtc_state *cstate);
void (*atomic_update_watermarks)(struct intel_atomic_state *state,
struct intel_crtc_state *cstate);
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc_state *cstate);
- int (*compute_global_watermarks)(struct drm_atomic_state *state);
+ int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
@@ -322,8 +323,8 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
- void (*load_csc_matrix)(struct drm_crtc_state *crtc_state);
- void (*load_luts)(struct drm_crtc_state *crtc_state);
+ void (*load_csc_matrix)(struct intel_crtc_state *crtc_state);
+ void (*load_luts)(struct intel_crtc_state *crtc_state);
};
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
@@ -333,16 +334,17 @@ struct drm_i915_display_funcs {
struct intel_csr {
struct work_struct work;
const char *fw_path;
- uint32_t required_version;
- uint32_t max_fw_size; /* bytes */
- uint32_t *dmc_payload;
- uint32_t dmc_fw_size; /* dwords */
- uint32_t version;
- uint32_t mmio_count;
+ u32 required_version;
+ u32 max_fw_size; /* bytes */
+ u32 *dmc_payload;
+ u32 dmc_fw_size; /* dwords */
+ u32 version;
+ u32 mmio_count;
i915_reg_t mmioaddr[8];
- uint32_t mmiodata[8];
- uint32_t dc_state;
- uint32_t allowed_dc_mask;
+ u32 mmiodata[8];
+ u32 dc_state;
+ u32 allowed_dc_mask;
+ intel_wakeref_t wakeref;
};
enum i915_cache_level {
@@ -398,7 +400,7 @@ struct intel_fbc {
struct {
unsigned int mode_flags;
- uint32_t hsw_bdw_pixel_rate;
+ u32 hsw_bdw_pixel_rate;
} crtc;
struct {
@@ -417,7 +419,7 @@ struct intel_fbc {
int y;
- uint16_t pixel_blend_mode;
+ u16 pixel_blend_mode;
} plane;
struct {
@@ -509,6 +511,7 @@ struct i915_psr {
ktime_t last_exit;
bool sink_not_reliable;
bool irq_aux_error;
+ u16 su_x_granularity;
};
enum intel_pch {
@@ -556,7 +559,7 @@ struct i915_suspend_saved_registers {
u32 saveSWF0[16];
u32 saveSWF1[16];
u32 saveSWF3[3];
- uint64_t saveFENCE[I915_MAX_NUM_FENCES];
+ u64 saveFENCE[I915_MAX_NUM_FENCES];
u32 savePCH_PORT_HOTPLUG;
u16 saveGCDGMBUS;
};
@@ -819,6 +822,8 @@ struct i915_power_domains {
bool display_core_suspended;
int power_well_count;
+ intel_wakeref_t wakeref;
+
struct mutex lock;
int domain_use_count[POWER_DOMAIN_NUM];
struct i915_power_well *power_wells;
@@ -901,9 +906,9 @@ struct i915_gem_mm {
atomic_t bsd_engine_dispatch_index;
/** Bit 6 swizzling required for X tiling */
- uint32_t bit_6_swizzle_x;
+ u32 bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
- uint32_t bit_6_swizzle_y;
+ u32 bit_6_swizzle_y;
/* accounting, useful for userland debugging */
spinlock_t object_stat_lock;
@@ -930,18 +935,20 @@ struct ddi_vbt_port_info {
* populate this field.
*/
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
- uint8_t hdmi_level_shift;
+ u8 hdmi_level_shift;
- uint8_t supports_dvi:1;
- uint8_t supports_hdmi:1;
- uint8_t supports_dp:1;
- uint8_t supports_edp:1;
+ u8 supports_dvi:1;
+ u8 supports_hdmi:1;
+ u8 supports_dp:1;
+ u8 supports_edp:1;
+ u8 supports_typec_usb:1;
+ u8 supports_tbt:1;
- uint8_t alternate_aux_channel;
- uint8_t alternate_ddc_pin;
+ u8 alternate_aux_channel;
+ u8 alternate_ddc_pin;
- uint8_t dp_boost_level;
- uint8_t hdmi_boost_level;
+ u8 dp_boost_level;
+ u8 hdmi_boost_level;
int dp_max_link_rate; /* 0 for not limited by VBT */
};
@@ -1032,41 +1039,41 @@ enum intel_ddb_partitioning {
struct intel_wm_level {
bool enable;
- uint32_t pri_val;
- uint32_t spr_val;
- uint32_t cur_val;
- uint32_t fbc_val;
+ u32 pri_val;
+ u32 spr_val;
+ u32 cur_val;
+ u32 fbc_val;
};
struct ilk_wm_values {
- uint32_t wm_pipe[3];
- uint32_t wm_lp[3];
- uint32_t wm_lp_spr[3];
- uint32_t wm_linetime[3];
+ u32 wm_pipe[3];
+ u32 wm_lp[3];
+ u32 wm_lp_spr[3];
+ u32 wm_linetime[3];
bool enable_fbc_wm;
enum intel_ddb_partitioning partitioning;
};
struct g4x_pipe_wm {
- uint16_t plane[I915_MAX_PLANES];
- uint16_t fbc;
+ u16 plane[I915_MAX_PLANES];
+ u16 fbc;
};
struct g4x_sr_wm {
- uint16_t plane;
- uint16_t cursor;
- uint16_t fbc;
+ u16 plane;
+ u16 cursor;
+ u16 fbc;
};
struct vlv_wm_ddl_values {
- uint8_t plane[I915_MAX_PLANES];
+ u8 plane[I915_MAX_PLANES];
};
struct vlv_wm_values {
struct g4x_pipe_wm pipe[3];
struct g4x_sr_wm sr;
struct vlv_wm_ddl_values ddl[3];
- uint8_t level;
+ u8 level;
bool cxsr;
};
@@ -1080,10 +1087,10 @@ struct g4x_wm_values {
};
struct skl_ddb_entry {
- uint16_t start, end; /* in number of blocks, 'end' is exclusive */
+ u16 start, end; /* in number of blocks, 'end' is exclusive */
};
-static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
+static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
{
return entry->end - entry->start;
}
@@ -1107,8 +1114,8 @@ struct skl_ddb_values {
};
struct skl_wm_level {
- uint16_t plane_res_b;
- uint8_t plane_res_l;
+ u16 plane_res_b;
+ u8 plane_res_l;
bool plane_en;
};
@@ -1117,15 +1124,15 @@ struct skl_wm_params {
bool x_tiled, y_tiled;
bool rc_surface;
bool is_planar;
- uint32_t width;
- uint8_t cpp;
- uint32_t plane_pixel_rate;
- uint32_t y_min_scanlines;
- uint32_t plane_bytes_per_line;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
uint_fixed_16_16_t plane_blocks_per_line;
uint_fixed_16_16_t y_tile_minimum;
- uint32_t linetime_us;
- uint32_t dbuf_block_size;
+ u32 linetime_us;
+ u32 dbuf_block_size;
};
/*
@@ -1155,6 +1162,25 @@ struct i915_runtime_pm {
atomic_t wakeref_count;
bool suspended;
bool irqs_enabled;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+ /*
+ * To aide detection of wakeref leaks and general misuse, we
+ * track all wakeref holders. With manual markup (i.e. returning
+ * a cookie to each rpm_get caller which they then supply to their
+ * paired rpm_put) we can remove corresponding pairs of and keep
+ * the array trimmed to active wakerefs.
+ */
+ struct intel_runtime_pm_debug {
+ spinlock_t lock;
+
+ depot_stack_handle_t last_acquire;
+ depot_stack_handle_t last_release;
+
+ depot_stack_handle_t *owners;
+ unsigned long count;
+ } debug;
+#endif
};
enum intel_pipe_crc_source {
@@ -1311,6 +1337,12 @@ struct i915_perf_stream {
struct list_head link;
/**
+ * @wakeref: As we keep the device awake while the perf stream is
+ * active, we track our runtime pm reference for later release.
+ */
+ intel_wakeref_t wakeref;
+
+ /**
* @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
* properties given when opening a stream, representing the contents
* of a single sample as read() by userspace.
@@ -1430,7 +1462,8 @@ struct drm_i915_private {
struct kmem_cache *dependencies;
struct kmem_cache *priorities;
- const struct intel_device_info info;
+ const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
+ struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
/**
@@ -1482,14 +1515,14 @@ struct drm_i915_private {
* Base address of where the gmbus and gpio blocks are located (either
* on PCH or on SoC for platforms without PCH).
*/
- uint32_t gpio_mmio_base;
+ u32 gpio_mmio_base;
/* MMIO base address for MIPI regs */
- uint32_t mipi_mmio_base;
+ u32 mipi_mmio_base;
- uint32_t psr_mmio_base;
+ u32 psr_mmio_base;
- uint32_t pps_mmio_base;
+ u32 pps_mmio_base;
wait_queue_head_t gmbus_wait_queue;
@@ -1744,17 +1777,17 @@ struct drm_i915_private {
* in 0.5us units for WM1+.
*/
/* primary */
- uint16_t pri_latency[5];
+ u16 pri_latency[5];
/* sprite */
- uint16_t spr_latency[5];
+ u16 spr_latency[5];
/* cursor */
- uint16_t cur_latency[5];
+ u16 cur_latency[5];
/*
* Raw watermark memory latency values
* for SKL for all 8 levels
* in 1us units.
*/
- uint16_t skl_latency[8];
+ u16 skl_latency[8];
/* current hardware state */
union {
@@ -1764,7 +1797,7 @@ struct drm_i915_private {
struct g4x_wm_values g4x;
};
- uint8_t max_level;
+ u8 max_level;
/*
* Should be held around atomic WM register writing; also
@@ -1947,7 +1980,6 @@ struct drm_i915_private {
struct list_head active_rings;
struct list_head closed_vma;
u32 active_requests;
- u32 request_serial;
/**
* Is the GPU currently considered idle, or busy executing
@@ -1956,7 +1988,7 @@ struct drm_i915_private {
* In order to reduce the effect on performance, there
* is a slight delay before we do so.
*/
- bool awake;
+ intel_wakeref_t awake;
/**
* The number of times we have woken up.
@@ -2191,17 +2223,12 @@ static inline unsigned int i915_sg_segment_size(void)
return size;
}
-static inline const struct intel_device_info *
-intel_info(const struct drm_i915_private *dev_priv)
-{
- return &dev_priv->info;
-}
-
-#define INTEL_INFO(dev_priv) intel_info((dev_priv))
+#define INTEL_INFO(dev_priv) (&(dev_priv)->__info)
+#define RUNTIME_INFO(dev_priv) (&(dev_priv)->__runtime)
#define DRIVER_CAPS(dev_priv) (&(dev_priv)->caps)
-#define INTEL_GEN(dev_priv) ((dev_priv)->info.gen)
-#define INTEL_DEVID(dev_priv) ((dev_priv)->info.device_id)
+#define INTEL_GEN(dev_priv) (INTEL_INFO(dev_priv)->gen)
+#define INTEL_DEVID(dev_priv) (RUNTIME_INFO(dev_priv)->device_id)
#define REVID_FOREVER 0xff
#define INTEL_REVID(dev_priv) ((dev_priv)->drm.pdev->revision)
@@ -2212,8 +2239,12 @@ intel_info(const struct drm_i915_private *dev_priv)
GENMASK((e) - 1, (s) - 1))
/* Returns true if Gen is in inclusive range [Start, End] */
-#define IS_GEN(dev_priv, s, e) \
- (!!((dev_priv)->info.gen_mask & INTEL_GEN_MASK((s), (e))))
+#define IS_GEN_RANGE(dev_priv, s, e) \
+ (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
+
+#define IS_GEN(dev_priv, n) \
+ (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
+ INTEL_INFO(dev_priv)->gen == (n))
/*
* Return true if revision is in range [since,until] inclusive.
@@ -2223,7 +2254,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_PLATFORM(dev_priv, p) ((dev_priv)->info.platform_mask & BIT(p))
+#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2245,7 +2276,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
- (dev_priv)->info.gt == 1)
+ INTEL_INFO(dev_priv)->gt == 1)
#define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
#define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
#define IS_HASWELL(dev_priv) IS_PLATFORM(dev_priv, INTEL_HASWELL)
@@ -2257,7 +2288,7 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
-#define IS_MOBILE(dev_priv) ((dev_priv)->info.is_mobile)
+#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
@@ -2268,11 +2299,13 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xf) == 0xe)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
- (dev_priv)->info.gt == 3)
+ INTEL_INFO(dev_priv)->gt == 3)
#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
- (dev_priv)->info.gt == 3)
+ INTEL_INFO(dev_priv)->gt == 3)
+#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
+ INTEL_INFO(dev_priv)->gt == 1)
/* ULX machines are also considered ULT. */
#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
INTEL_DEVID(dev_priv) == 0x0A1E)
@@ -2295,21 +2328,21 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
INTEL_DEVID(dev_priv) == 0x87C0)
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (dev_priv)->info.gt == 2)
+ INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (dev_priv)->info.gt == 3)
+ INTEL_INFO(dev_priv)->gt == 3)
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
- (dev_priv)->info.gt == 4)
+ INTEL_INFO(dev_priv)->gt == 4)
#define IS_KBL_GT2(dev_priv) (IS_KABYLAKE(dev_priv) && \
- (dev_priv)->info.gt == 2)
+ INTEL_INFO(dev_priv)->gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
- (dev_priv)->info.gt == 3)
+ INTEL_INFO(dev_priv)->gt == 3)
#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
- (dev_priv)->info.gt == 2)
+ INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
- (dev_priv)->info.gt == 3)
+ INTEL_INFO(dev_priv)->gt == 3)
#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
@@ -2366,26 +2399,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_ICL_REVID(p, since, until) \
(IS_ICELAKE(p) && IS_REVID(p, since, until))
-/*
- * The genX designation typically refers to the render engine, so render
- * capability related checks should use IS_GEN, while display and other checks
- * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
- * chips, etc.).
- */
-#define IS_GEN2(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(1)))
-#define IS_GEN3(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(2)))
-#define IS_GEN4(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(3)))
-#define IS_GEN5(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(4)))
-#define IS_GEN6(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(5)))
-#define IS_GEN7(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(6)))
-#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
-#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
-#define IS_GEN10(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(9)))
-#define IS_GEN11(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(10)))
-
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
-#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
-#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
+#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
+#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
@@ -2399,29 +2415,27 @@ intel_info(const struct drm_i915_private *dev_priv)
#define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \
- (!!((dev_priv)->info.ring_mask & ENGINE_MASK(id)))
+ (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
-#define HAS_LEGACY_SEMAPHORES(dev_priv) IS_GEN7(dev_priv)
-
-#define HAS_LLC(dev_priv) ((dev_priv)->info.has_llc)
-#define HAS_SNOOP(dev_priv) ((dev_priv)->info.has_snoop)
+#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
+#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
-#define HWS_NEEDS_PHYSICAL(dev_priv) ((dev_priv)->info.hws_needs_physical)
+#define HWS_NEEDS_PHYSICAL(dev_priv) (INTEL_INFO(dev_priv)->hws_needs_physical)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
- ((dev_priv)->info.has_logical_ring_contexts)
+ (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
- ((dev_priv)->info.has_logical_ring_elsq)
+ (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
- ((dev_priv)->info.has_logical_ring_preemption)
+ (INTEL_INFO(dev_priv)->has_logical_ring_preemption)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
@@ -2435,12 +2449,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
- ((sizes) & ~(dev_priv)->info.page_sizes) == 0; \
+ ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
})
-#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.display.has_overlay)
+#define HAS_OVERLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
- ((dev_priv)->info.display.overlay_needs_physical)
+ (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
@@ -2458,42 +2472,42 @@ intel_info(const struct drm_i915_private *dev_priv)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
-#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN2(dev_priv) && \
+#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
!(IS_I915G(dev_priv) || \
IS_I915GM(dev_priv)))
-#define SUPPORTS_TV(dev_priv) ((dev_priv)->info.display.supports_tv)
-#define I915_HAS_HOTPLUG(dev_priv) ((dev_priv)->info.display.has_hotplug)
+#define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv)
+#define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug)
#define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2)
-#define HAS_FBC(dev_priv) ((dev_priv)->info.display.has_fbc)
+#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc)
#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7)
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
-#define HAS_DP_MST(dev_priv) ((dev_priv)->info.display.has_dp_mst)
+#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
-#define HAS_DDI(dev_priv) ((dev_priv)->info.display.has_ddi)
-#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) ((dev_priv)->info.has_fpga_dbg)
-#define HAS_PSR(dev_priv) ((dev_priv)->info.display.has_psr)
+#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
+#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
+#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
-#define HAS_RC6(dev_priv) ((dev_priv)->info.has_rc6)
-#define HAS_RC6p(dev_priv) ((dev_priv)->info.has_rc6p)
+#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
+#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
#define HAS_RC6pp(dev_priv) (false) /* HW was never validated */
-#define HAS_CSR(dev_priv) ((dev_priv)->info.display.has_csr)
+#define HAS_CSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_csr)
-#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
-#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
+#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
+#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
-#define HAS_IPC(dev_priv) ((dev_priv)->info.display.has_ipc)
+#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
* properties, so we have separate macros to test them.
*/
-#define HAS_GUC(dev_priv) ((dev_priv)->info.has_guc)
-#define HAS_GUC_CT(dev_priv) ((dev_priv)->info.has_guc_ct)
+#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
+#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))
@@ -2502,11 +2516,11 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_HUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
/* Having a GuC is not the same as using a GuC */
-#define USES_GUC(dev_priv) intel_uc_is_using_guc()
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission()
-#define USES_HUC(dev_priv) intel_uc_is_using_huc()
+#define USES_GUC(dev_priv) intel_uc_is_using_guc(dev_priv)
+#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv)
+#define USES_HUC(dev_priv) intel_uc_is_using_huc(dev_priv)
-#define HAS_POOLED_EU(dev_priv) ((dev_priv)->info.has_pooled_eu)
+#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
#define INTEL_PCH_DEVICE_ID_MASK 0xff80
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2546,12 +2560,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
-#define HAS_GMCH_DISPLAY(dev_priv) ((dev_priv)->info.display.has_gmch_display)
+#define HAS_GMCH_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch_display)
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
/* DPF == dynamic parity feature */
-#define HAS_L3_DPF(dev_priv) ((dev_priv)->info.has_l3_dpf)
+#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2 : HAS_L3_DPF(dev_priv))
@@ -2601,19 +2615,7 @@ extern const struct dev_pm_ops i915_pm_ops;
extern int i915_driver_load(struct pci_dev *pdev,
const struct pci_device_id *ent);
extern void i915_driver_unload(struct drm_device *dev);
-extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
-extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
-
-extern void i915_reset(struct drm_i915_private *i915,
- unsigned int stalled_mask,
- const char *reason);
-extern int i915_reset_engine(struct intel_engine_cs *engine,
- const char *reason);
-
-extern bool intel_has_reset_engine(struct drm_i915_private *dev_priv);
-extern int intel_reset_guc(struct drm_i915_private *dev_priv);
-extern int intel_guc_reset_engine(struct intel_guc *guc,
- struct intel_engine_cs *engine);
+
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2656,20 +2658,11 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
&dev_priv->gpu_error.hangcheck_work, delay);
}
-__printf(4, 5)
-void i915_handle_error(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- unsigned long flags,
- const char *fmt, ...);
-#define I915_ERROR_CAPTURE BIT(0)
-
extern void intel_irq_init(struct drm_i915_private *dev_priv);
extern void intel_irq_fini(struct drm_i915_private *dev_priv);
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
-void i915_clear_error_registers(struct drm_i915_private *dev_priv);
-
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
return dev_priv->gvt;
@@ -2693,45 +2686,45 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t mask,
- uint32_t bits);
+ u32 mask,
+ u32 bits);
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask);
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
static inline void
-ilk_enable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
+ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
{
ilk_update_display_irq(dev_priv, bits, bits);
}
static inline void
-ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits)
+ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
{
ilk_update_display_irq(dev_priv, bits, 0);
}
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask);
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, uint32_t bits)
+ enum pipe pipe, u32 bits)
{
bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
}
static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, uint32_t bits)
+ enum pipe pipe, u32 bits)
{
bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
}
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask);
+ u32 interrupt_mask,
+ u32 enabled_irq_mask);
static inline void
-ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
+ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
{
ibx_display_interrupt_update(dev_priv, bits, bits);
}
static inline void
-ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
+ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
{
ibx_display_interrupt_update(dev_priv, bits, 0);
}
@@ -2916,13 +2909,13 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_unpin_pages(obj);
}
-enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock */
+enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
I915_MM_NORMAL = 0,
- I915_MM_SHRINKER
+ I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
};
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass);
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass);
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
enum i915_map_type {
@@ -2991,7 +2984,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
+ u32 handle, u64 *offset);
int i915_gem_mmap_gtt_version(void);
void i915_gem_track_fb(struct drm_i915_gem_object *old,
@@ -3034,18 +3027,8 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return READ_ONCE(error->reset_engine_count[engine->id]);
}
-struct i915_request *
-i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
-int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
-void i915_gem_reset(struct drm_i915_private *dev_priv,
- unsigned int stalled_mask);
-void i915_gem_reset_finish_engine(struct intel_engine_cs *engine);
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
-void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct i915_request *request,
- bool stalled);
void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
@@ -3142,7 +3125,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
struct i915_gem_context *ctx,
- uint32_t *reg_state);
+ u32 *reg_state);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
@@ -3204,7 +3187,8 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
-void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+ struct mutex *mutex);
/* i915_gem_tiling.c */
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@ -3313,7 +3297,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
{
- return (struct intel_device_info *)&dev_priv->info;
+ return (struct intel_device_info *)INTEL_INFO(dev_priv);
}
/* modesetting */
@@ -3393,10 +3377,10 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy);
-uint8_t bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count);
+u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- uint8_t lane_lat_optim_mask);
-uint8_t bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
+ u8 lane_lat_optim_mask);
+u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
void chv_set_phy_signal_level(struct intel_encoder *encoder,
u32 deemph_reg_value, u32 margin_reg_value,
@@ -3599,90 +3583,6 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
}
-static inline bool
-__i915_request_irq_complete(const struct i915_request *rq)
-{
- struct intel_engine_cs *engine = rq->engine;
- u32 seqno;
-
- /* Note that the engine may have wrapped around the seqno, and
- * so our request->global_seqno will be ahead of the hardware,
- * even though it completed the request before wrapping. We catch
- * this by kicking all the waiters before resetting the seqno
- * in hardware, and also signal the fence.
- */
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
- return true;
-
- /* The request was dequeued before we were awoken. We check after
- * inspecting the hw to confirm that this was the same request
- * that generated the HWS update. The memory barriers within
- * the request execution are sufficient to ensure that a check
- * after reading the value from hw matches this request.
- */
- seqno = i915_request_global_seqno(rq);
- if (!seqno)
- return false;
-
- /* Before we do the heavier coherent read of the seqno,
- * check the value (hopefully) in the CPU cacheline.
- */
- if (__i915_request_completed(rq, seqno))
- return true;
-
- /* Ensure our read of the seqno is coherent so that we
- * do not "miss an interrupt" (i.e. if this is the last
- * request and the seqno write from the GPU is not visible
- * by the time the interrupt fires, we will see that the
- * request is incomplete and go back to sleep awaiting
- * another interrupt that will never come.)
- *
- * Strictly, we only need to do this once after an interrupt,
- * but it is easier and safer to do it every time the waiter
- * is woken.
- */
- if (engine->irq_seqno_barrier &&
- test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
-
- /* The ordering of irq_posted versus applying the barrier
- * is crucial. The clearing of the current irq_posted must
- * be visible before we perform the barrier operation,
- * such that if a subsequent interrupt arrives, irq_posted
- * is reasserted and our task rewoken (which causes us to
- * do another __i915_request_irq_complete() immediately
- * and reapply the barrier). Conversely, if the clear
- * occurs after the barrier, then an interrupt that arrived
- * whilst we waited on the barrier would not trigger a
- * barrier on the next pass, and the read may not see the
- * seqno update.
- */
- engine->irq_seqno_barrier(engine);
-
- /* If we consume the irq, but we are no longer the bottom-half,
- * the real bottom-half may not have serialised their own
- * seqno check with the irq-barrier (i.e. may have inspected
- * the seqno before we believe it coherent since they see
- * irq_posted == false but we are still running).
- */
- spin_lock_irq(&b->irq_lock);
- if (b->irq_wait && b->irq_wait->tsk != current)
- /* Note that if the bottom-half is changed as we
- * are sending the wake-up, the new bottom-half will
- * be woken by whomever made the change. We only have
- * to worry about when we steal the irq-posted for
- * ourself.
- */
- wake_up_process(b->irq_wait->tsk);
- spin_unlock_irq(&b->irq_lock);
-
- if (__i915_request_completed(rq, seqno))
- return true;
- }
-
- return false;
-}
-
void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7399ac7a5629..b359390ba22c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -25,18 +25,8 @@
*
*/
-#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "i915_vgpu.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
-#include "intel_mocs.h"
-#include "intel_workarounds.h"
-#include "i915_gemfs.h"
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/reservation.h>
@@ -47,6 +37,18 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+#include "i915_gemfs.h"
+#include "i915_reset.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
+
+#include "intel_drv.h"
+#include "intel_frontbuffer.h"
+#include "intel_mocs.h"
+#include "intel_workarounds.h"
+
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -139,6 +141,8 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
+ intel_wakeref_t wakeref;
+
GEM_TRACE("\n");
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -169,14 +173,13 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
i915_pmu_gt_parked(i915);
i915_vma_parked(i915);
- i915->gt.awake = false;
+ wakeref = fetch_and_zero(&i915->gt.awake);
+ GEM_BUG_ON(!wakeref);
if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915);
- intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);
-
- intel_runtime_pm_put(i915);
+ intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
return i915->gt.epoch;
}
@@ -201,12 +204,11 @@ void i915_gem_unpark(struct drm_i915_private *i915)
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->gt.active_requests);
+ assert_rpm_wakelock_held(i915);
if (i915->gt.awake)
return;
- intel_runtime_pm_get_noresume(i915);
-
/*
* It seems that the DMC likes to transition between the DC states a lot
* when there are no connected displays (no active power domains) during
@@ -218,9 +220,9 @@ void i915_gem_unpark(struct drm_i915_private *i915)
* Work around it by grabbing a GT IRQ power domain whilst there is any
* GT activity, preventing any DC state transitions.
*/
- intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
+ GEM_BUG_ON(!i915->gt.awake);
- i915->gt.awake = true;
if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
i915->gt.epoch = 1;
@@ -711,8 +713,8 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
static int
i915_gem_create(struct drm_file *file,
struct drm_i915_private *dev_priv,
- uint64_t size,
- uint32_t *handle_p)
+ u64 size,
+ u32 *handle_p)
{
struct drm_i915_gem_object *obj;
int ret;
@@ -783,6 +785,8 @@ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
{
+ intel_wakeref_t wakeref;
+
/*
* No actual flushing is required for the GTT write domain for reads
* from the GTT domain. Writes to it "immediately" go to main memory
@@ -809,13 +813,13 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
i915_gem_chipset_flush(dev_priv);
- intel_runtime_pm_get(dev_priv);
- spin_lock_irq(&dev_priv->uncore.lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
+ POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
- spin_unlock_irq(&dev_priv->uncore.lock);
- intel_runtime_pm_put(dev_priv);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ }
}
static void
@@ -859,58 +863,6 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
obj->write_domain = 0;
}
-static inline int
-__copy_to_user_swizzled(char __user *cpu_vaddr,
- const char *gpu_vaddr, int gpu_offset,
- int length)
-{
- int ret, cpu_offset = 0;
-
- while (length > 0) {
- int cacheline_end = ALIGN(gpu_offset + 1, 64);
- int this_length = min(cacheline_end - gpu_offset, length);
- int swizzled_gpu_offset = gpu_offset ^ 64;
-
- ret = __copy_to_user(cpu_vaddr + cpu_offset,
- gpu_vaddr + swizzled_gpu_offset,
- this_length);
- if (ret)
- return ret + length;
-
- cpu_offset += this_length;
- gpu_offset += this_length;
- length -= this_length;
- }
-
- return 0;
-}
-
-static inline int
-__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
- const char __user *cpu_vaddr,
- int length)
-{
- int ret, cpu_offset = 0;
-
- while (length > 0) {
- int cacheline_end = ALIGN(gpu_offset + 1, 64);
- int this_length = min(cacheline_end - gpu_offset, length);
- int swizzled_gpu_offset = gpu_offset ^ 64;
-
- ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
- cpu_vaddr + cpu_offset,
- this_length);
- if (ret)
- return ret + length;
-
- cpu_offset += this_length;
- gpu_offset += this_length;
- length -= this_length;
- }
-
- return 0;
-}
-
/*
* Pins the specified object's pages and synchronizes the object with
* GPU accesses. Sets needs_clflush to non-zero if the caller should
@@ -1030,72 +982,23 @@ err_unpin:
return ret;
}
-static void
-shmem_clflush_swizzled_range(char *addr, unsigned long length,
- bool swizzled)
-{
- if (unlikely(swizzled)) {
- unsigned long start = (unsigned long) addr;
- unsigned long end = (unsigned long) addr + length;
-
- /* For swizzling simply ensure that we always flush both
- * channels. Lame, but simple and it works. Swizzled
- * pwrite/pread is far from a hotpath - current userspace
- * doesn't use it at all. */
- start = round_down(start, 128);
- end = round_up(end, 128);
-
- drm_clflush_virt_range((void *)start, end - start);
- } else {
- drm_clflush_virt_range(addr, length);
- }
-
-}
-
-/* Only difference to the fast-path function is that this can handle bit17
- * and uses non-atomic copy and kmap functions. */
static int
-shmem_pread_slow(struct page *page, int offset, int length,
- char __user *user_data,
- bool page_do_bit17_swizzling, bool needs_clflush)
+shmem_pread(struct page *page, int offset, int len, char __user *user_data,
+ bool needs_clflush)
{
char *vaddr;
int ret;
vaddr = kmap(page);
- if (needs_clflush)
- shmem_clflush_swizzled_range(vaddr + offset, length,
- page_do_bit17_swizzling);
- if (page_do_bit17_swizzling)
- ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
- else
- ret = __copy_to_user(user_data, vaddr + offset, length);
- kunmap(page);
-
- return ret ? - EFAULT : 0;
-}
-
-static int
-shmem_pread(struct page *page, int offset, int length, char __user *user_data,
- bool page_do_bit17_swizzling, bool needs_clflush)
-{
- int ret;
+ if (needs_clflush)
+ drm_clflush_virt_range(vaddr + offset, len);
- ret = -ENODEV;
- if (!page_do_bit17_swizzling) {
- char *vaddr = kmap_atomic(page);
+ ret = __copy_to_user(user_data, vaddr + offset, len);
- if (needs_clflush)
- drm_clflush_virt_range(vaddr + offset, length);
- ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
- kunmap_atomic(vaddr);
- }
- if (ret == 0)
- return 0;
+ kunmap(page);
- return shmem_pread_slow(page, offset, length, user_data,
- page_do_bit17_swizzling, needs_clflush);
+ return ret ? -EFAULT : 0;
}
static int
@@ -1104,15 +1007,10 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
{
char __user *user_data;
u64 remain;
- unsigned int obj_do_bit17_swizzling;
unsigned int needs_clflush;
unsigned int idx, offset;
int ret;
- obj_do_bit17_swizzling = 0;
- if (i915_gem_object_needs_bit17_swizzle(obj))
- obj_do_bit17_swizzling = BIT(17);
-
ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
if (ret)
return ret;
@@ -1130,7 +1028,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pread(page, offset, length, user_data,
- page_to_phys(page) & obj_do_bit17_swizzling,
needs_clflush);
if (ret)
break;
@@ -1174,6 +1071,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
+ intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
void __user *user_data;
@@ -1184,7 +1082,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
PIN_NONFAULT |
@@ -1257,7 +1155,7 @@ out_unpin:
i915_vma_unpin(vma);
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return ret;
@@ -1358,6 +1256,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &i915->ggtt;
+ intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
u64 remain, offset;
@@ -1376,13 +1275,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
* This easily dwarfs any performance advantage from
* using the cache bypass of indirect GGTT access.
*/
- if (!intel_runtime_pm_get_if_in_use(i915)) {
+ wakeref = intel_runtime_pm_get_if_in_use(i915);
+ if (!wakeref) {
ret = -EFAULT;
goto out_unlock;
}
} else {
/* No backing pages, no fallback, we must force GGTT access */
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -1464,39 +1364,12 @@ out_unpin:
i915_vma_unpin(vma);
}
out_rpm:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
return ret;
}
-static int
-shmem_pwrite_slow(struct page *page, int offset, int length,
- char __user *user_data,
- bool page_do_bit17_swizzling,
- bool needs_clflush_before,
- bool needs_clflush_after)
-{
- char *vaddr;
- int ret;
-
- vaddr = kmap(page);
- if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
- shmem_clflush_swizzled_range(vaddr + offset, length,
- page_do_bit17_swizzling);
- if (page_do_bit17_swizzling)
- ret = __copy_from_user_swizzled(vaddr, offset, user_data,
- length);
- else
- ret = __copy_from_user(vaddr + offset, user_data, length);
- if (needs_clflush_after)
- shmem_clflush_swizzled_range(vaddr + offset, length,
- page_do_bit17_swizzling);
- kunmap(page);
-
- return ret ? -EFAULT : 0;
-}
-
/* Per-page copy function for the shmem pwrite fastpath.
* Flushes invalid cachelines before writing to the target if
* needs_clflush_before is set and flushes out any written cachelines after
@@ -1504,31 +1377,24 @@ shmem_pwrite_slow(struct page *page, int offset, int length,
*/
static int
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
- bool page_do_bit17_swizzling,
bool needs_clflush_before,
bool needs_clflush_after)
{
+ char *vaddr;
int ret;
- ret = -ENODEV;
- if (!page_do_bit17_swizzling) {
- char *vaddr = kmap_atomic(page);
+ vaddr = kmap(page);
- if (needs_clflush_before)
- drm_clflush_virt_range(vaddr + offset, len);
- ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
- if (needs_clflush_after)
- drm_clflush_virt_range(vaddr + offset, len);
+ if (needs_clflush_before)
+ drm_clflush_virt_range(vaddr + offset, len);
- kunmap_atomic(vaddr);
- }
- if (ret == 0)
- return ret;
+ ret = __copy_from_user(vaddr + offset, user_data, len);
+ if (!ret && needs_clflush_after)
+ drm_clflush_virt_range(vaddr + offset, len);
+
+ kunmap(page);
- return shmem_pwrite_slow(page, offset, len, user_data,
- page_do_bit17_swizzling,
- needs_clflush_before,
- needs_clflush_after);
+ return ret ? -EFAULT : 0;
}
static int
@@ -1538,7 +1404,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
void __user *user_data;
u64 remain;
- unsigned int obj_do_bit17_swizzling;
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
unsigned int offset, idx;
@@ -1553,10 +1418,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
if (ret)
return ret;
- obj_do_bit17_swizzling = 0;
- if (i915_gem_object_needs_bit17_swizzle(obj))
- obj_do_bit17_swizzling = BIT(17);
-
/* If we don't overwrite a cacheline completely we need to be
* careful to have up-to-date data by first clflushing. Don't
* overcomplicate things and flush the entire patch.
@@ -1573,7 +1434,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
ret = shmem_pwrite(page, offset, length, user_data,
- page_to_phys(page) & obj_do_bit17_swizzling,
(offset | length) & partial_cacheline_write,
needs_clflush & CLFLUSH_AFTER);
if (ret)
@@ -1713,8 +1573,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_set_domain *args = data;
struct drm_i915_gem_object *obj;
- uint32_t read_domains = args->read_domains;
- uint32_t write_domain = args->write_domain;
+ u32 read_domains = args->read_domains;
+ u32 write_domain = args->write_domain;
int err;
/* Only handle setting domains to types used by the CPU. */
@@ -1896,7 +1756,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (IS_ERR((void *)addr))
return addr;
- args->addr_ptr = (uint64_t) addr;
+ args->addr_ptr = (u64)addr;
return 0;
}
@@ -2009,6 +1869,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool write = area->vm_flags & VM_WRITE;
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
int ret;
@@ -2038,7 +1899,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
if (ret)
goto err;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -2116,7 +1977,7 @@ err_unpin:
err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
i915_gem_object_unpin_pages(obj);
err:
switch (ret) {
@@ -2189,6 +2050,7 @@ void
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ intel_wakeref_t wakeref;
/* Serialisation between user GTT access and our code depends upon
* revoking the CPU's PTE whilst the mutex is held. The next user
@@ -2199,7 +2061,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
* wakeref.
*/
lockdep_assert_held(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (!obj->userfault_count)
goto out;
@@ -2216,7 +2078,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
wmb();
out:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
}
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
@@ -2296,8 +2158,8 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
+ u32 handle,
+ u64 *offset)
{
struct drm_i915_gem_object *obj;
int ret;
@@ -2444,8 +2306,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
struct sg_table *pages;
pages = fetch_and_zero(&obj->mm.pages);
- if (!pages)
- return NULL;
+ if (IS_ERR_OR_NULL(pages))
+ return pages;
spin_lock(&i915->mm.obj_lock);
list_del(&obj->mm.link);
@@ -2469,22 +2331,23 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
return pages;
}
-void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
- enum i915_mm_subclass subclass)
+int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
+ enum i915_mm_subclass subclass)
{
struct sg_table *pages;
+ int ret;
if (i915_gem_object_has_pinned_pages(obj))
- return;
+ return -EBUSY;
GEM_BUG_ON(obj->bind_count);
- if (!i915_gem_object_has_pages(obj))
- return;
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock_nested(&obj->mm.lock, subclass);
- if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
+ if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
+ ret = -EBUSY;
goto unlock;
+ }
/*
* ->put_pages might need to allocate memory for the bit17 swizzle
@@ -2492,11 +2355,24 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
* lists early.
*/
pages = __i915_gem_object_unset_pages(obj);
+
+ /*
+ * XXX Temporary hijinx to avoid updating all backends to handle
+ * NULL pages. In the future, when we have more asynchronous
+ * get_pages backends we should be better able to handle the
+ * cancellation of the async task in a more uniform manner.
+ */
+ if (!pages && !i915_gem_object_needs_async_cancel(obj))
+ pages = ERR_PTR(-EINVAL);
+
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages);
+ ret = 0;
unlock:
mutex_unlock(&obj->mm.lock);
+
+ return ret;
}
bool i915_sg_trim(struct sg_table *orig_st)
@@ -3000,61 +2876,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
-static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
- const struct i915_gem_context *ctx)
-{
- unsigned int score;
- unsigned long prev_hang;
-
- if (i915_gem_context_is_banned(ctx))
- score = I915_CLIENT_SCORE_CONTEXT_BAN;
- else
- score = 0;
-
- prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
- if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
- score += I915_CLIENT_SCORE_HANG_FAST;
-
- if (score) {
- atomic_add(score, &file_priv->ban_score);
-
- DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
- ctx->name, score,
- atomic_read(&file_priv->ban_score));
- }
-}
-
-static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
-{
- unsigned int score;
- bool banned, bannable;
-
- atomic_inc(&ctx->guilty_count);
-
- bannable = i915_gem_context_is_bannable(ctx);
- score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
- banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
-
- /* Cool contexts don't accumulate client ban score */
- if (!bannable)
- return;
-
- if (banned) {
- DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
- ctx->name, atomic_read(&ctx->guilty_count),
- score);
- i915_gem_context_set_banned(ctx);
- }
-
- if (!IS_ERR_OR_NULL(ctx->file_priv))
- i915_gem_client_mark_guilty(ctx->file_priv, ctx);
-}
-
-static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
-{
- atomic_inc(&ctx->active_count);
-}
-
struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
@@ -3085,366 +2906,6 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
return active;
}
-/*
- * Ensure irq handler finishes, and not run again.
- * Also return the active request so that we only search for it once.
- */
-struct i915_request *
-i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
-{
- struct i915_request *request;
-
- /*
- * During the reset sequence, we must prevent the engine from
- * entering RC6. As the context state is undefined until we restart
- * the engine, if it does enter RC6 during the reset, the state
- * written to the powercontext is undefined and so we may lose
- * GPU state upon resume, i.e. fail to restart after a reset.
- */
- intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
-
- request = engine->reset.prepare(engine);
- if (request && request->fence.error == -EIO)
- request = ERR_PTR(-EIO); /* Previous reset failed! */
-
- return request;
-}
-
-int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- struct i915_request *request;
- enum intel_engine_id id;
- int err = 0;
-
- for_each_engine(engine, dev_priv, id) {
- request = i915_gem_reset_prepare_engine(engine);
- if (IS_ERR(request)) {
- err = PTR_ERR(request);
- continue;
- }
-
- engine->hangcheck.active_request = request;
- }
-
- i915_gem_revoke_fences(dev_priv);
- intel_uc_sanitize(dev_priv);
-
- return err;
-}
-
-static void engine_skip_context(struct i915_request *request)
-{
- struct intel_engine_cs *engine = request->engine;
- struct i915_gem_context *hung_ctx = request->gem_context;
- struct i915_timeline *timeline = request->timeline;
- unsigned long flags;
-
- GEM_BUG_ON(timeline == &engine->timeline);
-
- spin_lock_irqsave(&engine->timeline.lock, flags);
- spin_lock(&timeline->lock);
-
- list_for_each_entry_continue(request, &engine->timeline.requests, link)
- if (request->gem_context == hung_ctx)
- i915_request_skip(request, -EIO);
-
- list_for_each_entry(request, &timeline->requests, link)
- i915_request_skip(request, -EIO);
-
- spin_unlock(&timeline->lock);
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-}
-
-/* Returns the request if it was guilty of the hang */
-static struct i915_request *
-i915_gem_reset_request(struct intel_engine_cs *engine,
- struct i915_request *request,
- bool stalled)
-{
- /* The guilty request will get skipped on a hung engine.
- *
- * Users of client default contexts do not rely on logical
- * state preserved between batches so it is safe to execute
- * queued requests following the hang. Non default contexts
- * rely on preserved state, so skipping a batch loses the
- * evolution of the state and it needs to be considered corrupted.
- * Executing more queued batches on top of corrupted state is
- * risky. But we take the risk by trying to advance through
- * the queued requests in order to make the client behaviour
- * more predictable around resets, by not throwing away random
- * amount of batches it has prepared for execution. Sophisticated
- * clients can use gem_reset_stats_ioctl and dma fence status
- * (exported via sync_file info ioctl on explicit fences) to observe
- * when it loses the context state and should rebuild accordingly.
- *
- * The context ban, and ultimately the client ban, mechanism are safety
- * valves if client submission ends up resulting in nothing more than
- * subsequent hangs.
- */
-
- if (i915_request_completed(request)) {
- GEM_TRACE("%s pardoned global=%d (fence %llx:%lld), current %d\n",
- engine->name, request->global_seqno,
- request->fence.context, request->fence.seqno,
- intel_engine_get_seqno(engine));
- stalled = false;
- }
-
- if (stalled) {
- i915_gem_context_mark_guilty(request->gem_context);
- i915_request_skip(request, -EIO);
-
- /* If this context is now banned, skip all pending requests. */
- if (i915_gem_context_is_banned(request->gem_context))
- engine_skip_context(request);
- } else {
- /*
- * Since this is not the hung engine, it may have advanced
- * since the hang declaration. Double check by refinding
- * the active request at the time of the reset.
- */
- request = i915_gem_find_active_request(engine);
- if (request) {
- unsigned long flags;
-
- i915_gem_context_mark_innocent(request->gem_context);
- dma_fence_set_error(&request->fence, -EAGAIN);
-
- /* Rewind the engine to replay the incomplete rq */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- request = list_prev_entry(request, link);
- if (&request->link == &engine->timeline.requests)
- request = NULL;
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
- }
- }
-
- return request;
-}
-
-void i915_gem_reset_engine(struct intel_engine_cs *engine,
- struct i915_request *request,
- bool stalled)
-{
- /*
- * Make sure this write is visible before we re-enable the interrupt
- * handlers on another CPU, as tasklet_enable() resolves to just
- * a compiler barrier which is insufficient for our purpose here.
- */
- smp_store_mb(engine->irq_posted, 0);
-
- if (request)
- request = i915_gem_reset_request(engine, request, stalled);
-
- /* Setup the CS to resume from the breadcrumb of the hung request */
- engine->reset.reset(engine, request);
-}
-
-void i915_gem_reset(struct drm_i915_private *dev_priv,
- unsigned int stalled_mask)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- i915_retire_requests(dev_priv);
-
- for_each_engine(engine, dev_priv, id) {
- struct intel_context *ce;
-
- i915_gem_reset_engine(engine,
- engine->hangcheck.active_request,
- stalled_mask & ENGINE_MASK(id));
- ce = fetch_and_zero(&engine->last_retired_context);
- if (ce)
- intel_context_unpin(ce);
-
- /*
- * Ostensibily, we always want a context loaded for powersaving,
- * so if the engine is idle after the reset, send a request
- * to load our scratch kernel_context.
- *
- * More mysteriously, if we leave the engine idle after a reset,
- * the next userspace batch may hang, with what appears to be
- * an incoherent read by the CS (presumably stale TLB). An
- * empty request appears sufficient to paper over the glitch.
- */
- if (intel_engine_is_idle(engine)) {
- struct i915_request *rq;
-
- rq = i915_request_alloc(engine,
- dev_priv->kernel_context);
- if (!IS_ERR(rq))
- i915_request_add(rq);
- }
- }
-
- i915_gem_restore_fences(dev_priv);
-}
-
-void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
-{
- engine->reset.finish(engine);
-
- intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
-}
-
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- for_each_engine(engine, dev_priv, id) {
- engine->hangcheck.active_request = NULL;
- i915_gem_reset_finish_engine(engine);
- }
-}
-
-static void nop_submit_request(struct i915_request *request)
-{
- unsigned long flags;
-
- GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
- request->engine->name,
- request->fence.context, request->fence.seqno);
- dma_fence_set_error(&request->fence, -EIO);
-
- spin_lock_irqsave(&request->engine->timeline.lock, flags);
- __i915_request_submit(request);
- intel_engine_init_global_seqno(request->engine, request->global_seqno);
- spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
-}
-
-void i915_gem_set_wedged(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- GEM_TRACE("start\n");
-
- if (GEM_SHOW_DEBUG()) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- for_each_engine(engine, i915, id)
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
-
- if (test_and_set_bit(I915_WEDGED, &i915->gpu_error.flags))
- goto out;
-
- /*
- * First, stop submission to hw, but do not yet complete requests by
- * rolling the global seqno forward (since this would complete requests
- * for which we haven't set the fence error to EIO yet).
- */
- for_each_engine(engine, i915, id)
- i915_gem_reset_prepare_engine(engine);
-
- /* Even if the GPU reset fails, it should still stop the engines */
- if (INTEL_GEN(i915) >= 5)
- intel_gpu_reset(i915, ALL_ENGINES);
-
- for_each_engine(engine, i915, id) {
- engine->submit_request = nop_submit_request;
- engine->schedule = NULL;
- }
- i915->caps.scheduler = 0;
-
- /*
- * Make sure no request can slip through without getting completed by
- * either this call here to intel_engine_init_global_seqno, or the one
- * in nop_submit_request.
- */
- synchronize_rcu();
-
- /* Mark all executing requests as skipped */
- for_each_engine(engine, i915, id)
- engine->cancel_requests(engine);
-
- for_each_engine(engine, i915, id) {
- i915_gem_reset_finish_engine(engine);
- intel_engine_wakeup(engine);
- }
-
-out:
- GEM_TRACE("end\n");
-
- wake_up_all(&i915->gpu_error.reset_queue);
-}
-
-bool i915_gem_unset_wedged(struct drm_i915_private *i915)
-{
- struct i915_timeline *tl;
-
- lockdep_assert_held(&i915->drm.struct_mutex);
- if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
- return true;
-
- GEM_TRACE("start\n");
-
- /*
- * Before unwedging, make sure that all pending operations
- * are flushed and errored out - we may have requests waiting upon
- * third party fences. We marked all inflight requests as EIO, and
- * every execbuf since returned EIO, for consistency we want all
- * the currently pending requests to also be marked as EIO, which
- * is done inside our nop_submit_request - and so we must wait.
- *
- * No more can be submitted until we reset the wedged bit.
- */
- list_for_each_entry(tl, &i915->gt.timelines, link) {
- struct i915_request *rq;
-
- rq = i915_gem_active_peek(&tl->last_request,
- &i915->drm.struct_mutex);
- if (!rq)
- continue;
-
- /*
- * We can't use our normal waiter as we want to
- * avoid recursively trying to handle the current
- * reset. The basic dma_fence_default_wait() installs
- * a callback for dma_fence_signal(), which is
- * triggered by our nop handler (indirectly, the
- * callback enables the signaler thread which is
- * woken by the nop_submit_request() advancing the seqno
- * and when the seqno passes the fence, the signaler
- * then signals the fence waking us up).
- */
- if (dma_fence_default_wait(&rq->fence, true,
- MAX_SCHEDULE_TIMEOUT) < 0)
- return false;
- }
- i915_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
-
- if (!intel_gpu_reset(i915, ALL_ENGINES))
- intel_engines_sanitize(i915);
-
- /*
- * Undo nop_submit_request. We prevent all new i915 requests from
- * being queued (by disallowing execbuf whilst wedged) so having
- * waited for all active requests above, we know the system is idle
- * and do not have to worry about a thread being inside
- * engine->submit_request() as we swap over. So unlike installing
- * the nop_submit_request on reset, we can do this from normal
- * context and do not require stop_machine().
- */
- intel_engines_reset_default_submission(i915);
- i915_gem_contexts_lost(i915);
-
- GEM_TRACE("end\n");
-
- smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
- clear_bit(I915_WEDGED, &i915->gpu_error.flags);
-
- return true;
-}
-
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
@@ -4856,8 +4317,9 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
struct llist_node *freed)
{
struct drm_i915_gem_object *obj, *on;
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
llist_for_each_entry_safe(obj, on, freed, freed) {
struct i915_vma *vma, *vn;
@@ -4918,7 +4380,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
if (on)
cond_resched();
}
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
}
static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
@@ -5027,13 +4489,13 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
void i915_gem_sanitize(struct drm_i915_private *i915)
{
- int err;
+ intel_wakeref_t wakeref;
GEM_TRACE("\n");
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
/*
@@ -5053,14 +4515,10 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* it may impact the display and we are uncertain about the stability
* of the reset, so this could be applied to even earlier gen.
*/
- err = -ENODEV;
- if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
- err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
- if (!err)
- intel_engines_sanitize(i915);
+ intel_engines_sanitize(i915, false);
intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
i915_gem_contexts_lost(i915);
mutex_unlock(&i915->drm.struct_mutex);
@@ -5068,11 +4526,12 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
int i915_gem_suspend(struct drm_i915_private *i915)
{
+ intel_wakeref_t wakeref;
int ret;
GEM_TRACE("\n");
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
intel_suspend_gt_powersave(i915);
mutex_lock(&i915->drm.struct_mutex);
@@ -5124,12 +4583,12 @@ int i915_gem_suspend(struct drm_i915_private *i915)
if (WARN_ON(!intel_engines_are_idle(i915)))
i915_gem_set_wedged(i915); /* no hope, discard everything */
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
return 0;
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
return ret;
}
@@ -5223,15 +4682,15 @@ void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_TILE_SURFACE_SWIZZLING);
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
return;
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
- else if (IS_GEN8(dev_priv))
+ else if (IS_GEN(dev_priv, 8))
I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
BUG();
@@ -5253,10 +4712,10 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
init_unused_ring(dev_priv, SRB1_BASE);
init_unused_ring(dev_priv, SRB2_BASE);
init_unused_ring(dev_priv, SRB3_BASE);
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
init_unused_ring(dev_priv, SRB0_BASE);
init_unused_ring(dev_priv, SRB1_BASE);
- } else if (IS_GEN3(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 3)) {
init_unused_ring(dev_priv, PRB1_BASE);
init_unused_ring(dev_priv, PRB2_BASE);
}
@@ -5580,7 +5039,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
}
ret = i915_gem_init_scratch(dev_priv,
- IS_GEN2(dev_priv) ? SZ_256K : PAGE_SIZE);
+ IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
if (ret) {
GEM_BUG_ON(ret == -EIO);
goto err_ggtt;
@@ -5840,6 +5299,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
i915_gem_idle_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
+ mutex_init(&dev_priv->gpu_error.wedge_mutex);
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 4ec386950f75..fae68c4c4683 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -86,7 +86,6 @@
*/
#include <linux/log2.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -311,7 +310,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
- if (IS_GEN8(i915))
+ if (IS_GEN(i915, 8))
desc |= GEN8_CTX_L3LLC_COHERENT;
/* TODO: WaDisableLiteRestore when we start using semaphore
@@ -339,11 +338,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
ctx->i915 = dev_priv;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
- struct intel_context *ce = &ctx->__engine[n];
-
- ce->gem_context = ctx;
- }
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
+ intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index f6d870b1f73e..47d82ce7ba6a 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -364,4 +364,12 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
+static inline void
+intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ ce->gem_context = ctx;
+}
+
#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 82e2ca17a441..02f7298bfe57 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -27,7 +27,6 @@
#include <linux/dma-buf.h>
#include <linux/reservation.h>
-#include <drm/drmP.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 02b83a5ed96c..f6855401f247 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -26,7 +26,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 485b259127c3..f250109e1f66 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -31,7 +31,6 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
-#include <drm/drmP.h>
#include <drm/drm_syncobj.h>
#include <drm/i915_drm.h>
@@ -1380,7 +1379,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* batchbuffers.
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
- IS_GEN6(eb->i915)) {
+ IS_GEN(eb->i915, 6)) {
err = i915_vma_bind(target, target->obj->cache_level,
PIN_GLOBAL);
if (WARN_ONCE(err,
@@ -1896,7 +1895,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
- if (!IS_GEN7(rq->i915) || rq->engine->id != RCS) {
+ if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -2203,6 +2202,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct sync_file *out_fence = NULL;
+ intel_wakeref_t wakeref;
int out_fence_fd = -1;
int err;
@@ -2273,7 +2273,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* wakeref that we hold until the GPU has been idle for at least
* 100ms.
*/
- intel_runtime_pm_get(eb.i915);
+ wakeref = intel_runtime_pm_get(eb.i915);
err = i915_mutex_lock_interruptible(dev);
if (err)
@@ -2425,7 +2425,7 @@ err_vma:
eb_release_vmas(&eb);
mutex_unlock(&dev->struct_mutex);
err_rpm:
- intel_runtime_pm_put(eb.i915);
+ intel_runtime_pm_put(eb.i915, wakeref);
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index d548ac05ccd7..46e259661294 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -21,7 +21,6 @@
* IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -193,9 +192,9 @@ static void fence_write(struct drm_i915_fence_reg *fence,
* and explicitly managed for internal users.
*/
- if (IS_GEN2(fence->i915))
+ if (IS_GEN(fence->i915, 2))
i830_write_fence_reg(fence, vma);
- else if (IS_GEN3(fence->i915))
+ else if (IS_GEN(fence->i915, 3))
i915_write_fence_reg(fence, vma);
else
i965_write_fence_reg(fence, vma);
@@ -210,6 +209,7 @@ static void fence_write(struct drm_i915_fence_reg *fence,
static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
+ intel_wakeref_t wakeref;
int ret;
if (vma) {
@@ -257,9 +257,10 @@ static int fence_update(struct drm_i915_fence_reg *fence,
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
*/
- if (intel_runtime_pm_get_if_in_use(fence->i915)) {
+ wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
+ if (wakeref) {
fence_write(fence, vma);
- intel_runtime_pm_put(fence->i915);
+ intel_runtime_pm_put(fence->i915, wakeref);
}
if (vma) {
@@ -554,8 +555,8 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
void
i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
{
- uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+ u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
/*
@@ -578,7 +579,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
} else {
- uint32_t dimm_c0, dimm_c1;
+ u32 dimm_c0, dimm_c1;
dimm_c0 = I915_READ(MAD_DIMM_C0);
dimm_c1 = I915_READ(MAD_DIMM_C1);
dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
@@ -596,13 +597,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
}
}
- } else if (IS_GEN5(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 5)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
@@ -610,7 +611,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if (IS_MOBILE(dev_priv) ||
IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
- uint32_t dcc;
+ u32 dcc;
/* On 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
@@ -647,7 +648,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
}
/* check for L-shaped memory aka modified enhanced addressing */
- if (IS_GEN4(dev_priv) &&
+ if (IS_GEN(dev_priv, 4) &&
!(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index add1fe7aeb93..9081e3bc5a59 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -33,11 +33,11 @@
#include <asm/set_memory.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_vgpu.h"
+#include "i915_reset.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@@ -474,8 +474,7 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
spin_unlock(&vm->free_pages.lock);
}
-static void i915_address_space_init(struct i915_address_space *vm,
- struct drm_i915_private *dev_priv)
+static void i915_address_space_init(struct i915_address_space *vm, int subclass)
{
/*
* The vm->mutex must be reclaim safe (for use in the shrinker).
@@ -483,7 +482,8 @@ static void i915_address_space_init(struct i915_address_space *vm,
* attempt holding the lock is immediately reported by lockdep.
*/
mutex_init(&vm->mutex);
- i915_gem_shrinker_taints_mutex(&vm->mutex);
+ lockdep_set_subclass(&vm->mutex, subclass);
+ i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
GEM_BUG_ON(!vm->total);
drm_mm_init(&vm->mm, 0, vm->total);
@@ -1423,8 +1423,6 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
gen8_initialize_pd(vm, pd);
gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
-
- mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
}
ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
@@ -1490,84 +1488,6 @@ unwind:
return -ENOMEM;
}
-static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
- struct i915_page_directory_pointer *pdp,
- u64 start, u64 length,
- gen8_pte_t scratch_pte,
- struct seq_file *m)
-{
- struct i915_address_space *vm = &ppgtt->vm;
- struct i915_page_directory *pd;
- u32 pdpe;
-
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- struct i915_page_table *pt;
- u64 pd_len = length;
- u64 pd_start = start;
- u32 pde;
-
- if (pdp->page_directory[pdpe] == ppgtt->vm.scratch_pd)
- continue;
-
- seq_printf(m, "\tPDPE #%d\n", pdpe);
- gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
- u32 pte;
- gen8_pte_t *pt_vaddr;
-
- if (pd->page_table[pde] == ppgtt->vm.scratch_pt)
- continue;
-
- pt_vaddr = kmap_atomic_px(pt);
- for (pte = 0; pte < GEN8_PTES; pte += 4) {
- u64 va = (pdpe << GEN8_PDPE_SHIFT |
- pde << GEN8_PDE_SHIFT |
- pte << GEN8_PTE_SHIFT);
- int i;
- bool found = false;
-
- for (i = 0; i < 4; i++)
- if (pt_vaddr[pte + i] != scratch_pte)
- found = true;
- if (!found)
- continue;
-
- seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
- for (i = 0; i < 4; i++) {
- if (pt_vaddr[pte + i] != scratch_pte)
- seq_printf(m, " %llx", pt_vaddr[pte + i]);
- else
- seq_puts(m, " SCRATCH ");
- }
- seq_puts(m, "\n");
- }
- kunmap_atomic(pt_vaddr);
- }
- }
-}
-
-static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
-{
- struct i915_address_space *vm = &ppgtt->vm;
- const gen8_pte_t scratch_pte = vm->scratch_pte;
- u64 start = 0, length = ppgtt->vm.total;
-
- if (use_4lvl(vm)) {
- u64 pml4e;
- struct i915_pml4 *pml4 = &ppgtt->pml4;
- struct i915_page_directory_pointer *pdp;
-
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (pml4->pdps[pml4e] == ppgtt->vm.scratch_pdp)
- continue;
-
- seq_printf(m, " PML4E #%llu\n", pml4e);
- gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
- }
- } else {
- gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
- }
-}
-
static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
{
struct i915_address_space *vm = &ppgtt->vm;
@@ -1628,7 +1548,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
/* From bdw, there is support for read-only pages in the PPGTT. */
ppgtt->vm.has_read_only = true;
- i915_address_space_init(&ppgtt->vm, i915);
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
@@ -1672,7 +1592,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
gen8_ppgtt_notify_vgt(ppgtt, true);
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
- ppgtt->debug_dump = gen8_dump_ppgtt;
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
@@ -1688,60 +1607,6 @@ err_free:
return ERR_PTR(err);
}
-static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
-{
- struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
- const gen6_pte_t scratch_pte = base->vm.scratch_pte;
- struct i915_page_table *pt;
- u32 pte, pde;
-
- gen6_for_all_pdes(pt, &base->pd, pde) {
- gen6_pte_t *vaddr;
-
- if (pt == base->vm.scratch_pt)
- continue;
-
- if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
- u32 expected =
- GEN6_PDE_ADDR_ENCODE(px_dma(pt)) |
- GEN6_PDE_VALID;
- u32 pd_entry = readl(ppgtt->pd_addr + pde);
-
- if (pd_entry != expected)
- seq_printf(m,
- "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
- pde,
- pd_entry,
- expected);
-
- seq_printf(m, "\tPDE: %x\n", pd_entry);
- }
-
- vaddr = kmap_atomic_px(base->pd.page_table[pde]);
- for (pte = 0; pte < GEN6_PTES; pte += 4) {
- int i;
-
- for (i = 0; i < 4; i++)
- if (vaddr[pte + i] != scratch_pte)
- break;
- if (i == 4)
- continue;
-
- seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
- pde, pte,
- (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
- for (i = 0; i < 4; i++) {
- if (vaddr[pte + i] != scratch_pte)
- seq_printf(m, " %08x", vaddr[pte + i]);
- else
- seq_puts(m, " SCRATCH");
- }
- seq_puts(m, "\n");
- }
- kunmap_atomic(vaddr);
- }
-}
-
/* Write pde (index) from the page directory @pd to the page table @pt */
static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
const unsigned int pde,
@@ -2075,6 +1940,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ int err;
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
@@ -2090,9 +1956,17 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- return i915_vma_pin(ppgtt->vma,
- 0, GEN6_PD_ALIGN,
- PIN_GLOBAL | PIN_HIGH);
+ err = i915_vma_pin(ppgtt->vma,
+ 0, GEN6_PD_ALIGN,
+ PIN_GLOBAL | PIN_HIGH);
+ if (err)
+ goto unpin;
+
+ return 0;
+
+unpin:
+ ppgtt->pin_count = 0;
+ return err;
}
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
@@ -2123,13 +1997,12 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
- i915_address_space_init(&ppgtt->base.vm, i915);
+ i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT);
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
- ppgtt->base.debug_dump = gen6_dump_ppgtt;
ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
@@ -2195,9 +2068,9 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
gtt_write_workarounds(dev_priv);
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
gen6_ppgtt_enable(dev_priv);
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
gen7_ppgtt_enable(dev_priv);
return 0;
@@ -2279,7 +2152,7 @@ static bool needs_idle_maps(struct drm_i915_private *dev_priv)
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
- return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
+ return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
}
static void gen6_check_faults(struct drm_i915_private *dev_priv)
@@ -2372,7 +2245,8 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
DMA_ATTR_NO_WARN))
return 0;
- /* If the DMA remap fails, one cause can be that we have
+ /*
+ * If the DMA remap fails, one cause can be that we have
* too many objects pinned in a small remapping table,
* such as swiotlb. Incrementally purge all other objects and
* try again - if there are no more pages to remove from
@@ -2382,8 +2256,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
} while (i915_gem_shrink(to_i915(obj->base.dev),
obj->base.size >> PAGE_SHIFT, NULL,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE));
+ I915_SHRINK_UNBOUND));
return -ENOSPC;
}
@@ -2655,6 +2528,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
{
struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
+ intel_wakeref_t wakeref;
u32 pte_flags;
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
@@ -2662,9 +2536,8 @@ static int ggtt_bind_vma(struct i915_vma *vma,
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
- intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
@@ -2681,10 +2554,10 @@ static int ggtt_bind_vma(struct i915_vma *vma,
static void ggtt_unbind_vma(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(i915);
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
@@ -2716,9 +2589,12 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
}
if (flags & I915_VMA_GLOBAL_BIND) {
- intel_runtime_pm_get(i915);
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
- intel_runtime_pm_put(i915);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(i915, wakeref) {
+ vma->vm->insert_entries(vma->vm, vma,
+ cache_level, pte_flags);
+ }
}
return 0;
@@ -2729,9 +2605,11 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
struct drm_i915_private *i915 = vma->vm->i915;
if (vma->flags & I915_VMA_GLOBAL_BIND) {
- intel_runtime_pm_get(i915);
- vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
- intel_runtime_pm_put(i915);
+ struct i915_address_space *vm = vma->vm;
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(i915, wakeref)
+ vm->clear_range(vm, vma->node.start, vma->size);
}
if (vma->flags & I915_VMA_LOCAL_BIND) {
@@ -3355,7 +3233,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
- if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+ if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
if (ggtt->vm.clear_range != nop_clear_range)
@@ -3556,7 +3435,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
* and beyond the end of the GTT if we do not provide a guard.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- i915_address_space_init(&ggtt->vm, dev_priv);
+ i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
ggtt->vm.is_ggtt = true;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 4874da09a3c4..9229b03d629b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -288,6 +288,8 @@ struct i915_address_space {
bool closed;
struct mutex mutex; /* protects vma and our lists */
+#define VM_CLASS_GGTT 0
+#define VM_CLASS_PPGTT 1
u64 scratch_pte;
struct i915_page_dma scratch_page;
@@ -413,8 +415,6 @@ struct i915_hw_ppgtt {
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
-
- void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};
struct gen6_hw_ppgtt {
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index 0d0144b2104c..fddde1033e74 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -22,7 +22,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index a6dd7c46de0d..cb1b0144d274 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -29,7 +29,8 @@
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
-#include <drm/drmP.h>
+#include <drm/drm_file.h>
+#include <drm/drm_device.h>
#include <drm/i915_drm.h>
@@ -56,6 +57,7 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
#define I915_GEM_OBJECT_IS_PROXY BIT(2)
+#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(3)
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
@@ -387,6 +389,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
+{
+ return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
+}
+
+static inline bool
i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
{
return obj->active_count;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index ea90d3a0d511..8ceecb026910 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -30,30 +30,27 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
-static bool shrinker_lock(struct drm_i915_private *i915, bool *unlock)
+static bool shrinker_lock(struct drm_i915_private *i915,
+ unsigned int flags,
+ bool *unlock)
{
- switch (mutex_trylock_recursive(&i915->drm.struct_mutex)) {
+ struct mutex *m = &i915->drm.struct_mutex;
+
+ switch (mutex_trylock_recursive(m)) {
case MUTEX_TRYLOCK_RECURSIVE:
*unlock = false;
return true;
case MUTEX_TRYLOCK_FAILED:
*unlock = false;
- preempt_disable();
- do {
- cpu_relax();
- if (mutex_trylock(&i915->drm.struct_mutex)) {
- *unlock = true;
- break;
- }
- } while (!need_resched());
- preempt_enable();
+ if (flags & I915_SHRINK_ACTIVE &&
+ mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
+ *unlock = true;
return *unlock;
case MUTEX_TRYLOCK_SUCCESS:
@@ -156,11 +153,12 @@ i915_gem_shrink(struct drm_i915_private *i915,
{ &i915->mm.bound_list, I915_SHRINK_BOUND },
{ NULL, 0 },
}, *phase;
+ intel_wakeref_t wakeref = 0;
unsigned long count = 0;
unsigned long scanned = 0;
bool unlock;
- if (!shrinker_lock(i915, &unlock))
+ if (!shrinker_lock(i915, flags, &unlock))
return 0;
/*
@@ -185,9 +183,11 @@ i915_gem_shrink(struct drm_i915_private *i915,
* device just to recover a little memory. If absolutely necessary,
* we will force the wake during oom-notifier.
*/
- if ((flags & I915_SHRINK_BOUND) &&
- !intel_runtime_pm_get_if_in_use(i915))
- flags &= ~I915_SHRINK_BOUND;
+ if (flags & I915_SHRINK_BOUND) {
+ wakeref = intel_runtime_pm_get_if_in_use(i915);
+ if (!wakeref)
+ flags &= ~I915_SHRINK_BOUND;
+ }
/*
* As we may completely rewrite the (un)bound list whilst unbinding
@@ -268,7 +268,7 @@ i915_gem_shrink(struct drm_i915_private *i915,
}
if (flags & I915_SHRINK_BOUND)
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
i915_retire_requests(i915);
@@ -295,14 +295,15 @@ i915_gem_shrink(struct drm_i915_private *i915,
*/
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
{
- unsigned long freed;
-
- intel_runtime_pm_get(i915);
- freed = i915_gem_shrink(i915, -1UL, NULL,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE);
- intel_runtime_pm_put(i915);
+ intel_wakeref_t wakeref;
+ unsigned long freed = 0;
+
+ with_intel_runtime_pm(i915, wakeref) {
+ freed = i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE);
+ }
return freed;
}
@@ -357,7 +358,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
sc->nr_scanned = 0;
- if (!shrinker_lock(i915, &unlock))
+ if (!shrinker_lock(i915, 0, &unlock))
return SHRINK_STOP;
freed = i915_gem_shrink(i915,
@@ -373,14 +374,16 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
- intel_runtime_pm_get(i915);
- freed += i915_gem_shrink(i915,
- sc->nr_to_scan - sc->nr_scanned,
- &sc->nr_scanned,
- I915_SHRINK_ACTIVE |
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND);
- intel_runtime_pm_put(i915);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(i915, wakeref) {
+ freed += i915_gem_shrink(i915,
+ sc->nr_to_scan - sc->nr_scanned,
+ &sc->nr_scanned,
+ I915_SHRINK_ACTIVE |
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND);
+ }
}
shrinker_unlock(i915, unlock);
@@ -388,31 +391,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
return sc->nr_scanned ? freed : SHRINK_STOP;
}
-static bool
-shrinker_lock_uninterruptible(struct drm_i915_private *i915, bool *unlock,
- int timeout_ms)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms);
-
- do {
- if (i915_gem_wait_for_idle(i915,
- 0, MAX_SCHEDULE_TIMEOUT) == 0 &&
- shrinker_lock(i915, unlock))
- break;
-
- schedule_timeout_killable(1);
- if (fatal_signal_pending(current))
- return false;
-
- if (time_after(jiffies, timeout)) {
- pr_err("Unable to lock GPU to purge memory.\n");
- return false;
- }
- } while (1);
-
- return true;
-}
-
static int
i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
{
@@ -420,8 +398,13 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
container_of(nb, struct drm_i915_private, mm.oom_notifier);
struct drm_i915_gem_object *obj;
unsigned long unevictable, bound, unbound, freed_pages;
+ intel_wakeref_t wakeref;
- freed_pages = i915_gem_shrink_all(i915);
+ freed_pages = 0;
+ with_intel_runtime_pm(i915, wakeref)
+ freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
@@ -447,10 +430,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
pr_info("Purging GPU memory, %lu pages freed, "
"%lu pages still pinned.\n",
freed_pages, unevictable);
- if (unbound || bound)
- pr_err("%lu and %lu pages still available in the "
- "bound and unbound GPU page lists.\n",
- bound, unbound);
*(unsigned long *)ptr += freed_pages;
return NOTIFY_DONE;
@@ -463,26 +442,23 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
struct i915_vma *vma, *next;
unsigned long freed_pages = 0;
+ intel_wakeref_t wakeref;
bool unlock;
- int ret;
- if (!shrinker_lock_uninterruptible(i915, &unlock, 5000))
+ if (!shrinker_lock(i915, 0, &unlock))
return NOTIFY_DONE;
/* Force everything onto the inactive lists */
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
+ if (i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT))
goto out;
- intel_runtime_pm_get(i915);
- freed_pages += i915_gem_shrink(i915, -1UL, NULL,
- I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE |
- I915_SHRINK_VMAPS);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ freed_pages += i915_gem_shrink(i915, -1UL, NULL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_VMAPS);
/* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next,
@@ -533,13 +509,40 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
unregister_shrinker(&i915->mm.shrinker);
}
-void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
+void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
+ struct mutex *mutex)
{
+ bool unlock = false;
+
if (!IS_ENABLED(CONFIG_LOCKDEP))
return;
+ if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
+ mutex_acquire(&i915->drm.struct_mutex.dep_map,
+ I915_MM_NORMAL, 0, _RET_IP_);
+ unlock = true;
+ }
+
fs_reclaim_acquire(GFP_KERNEL);
- mutex_lock(mutex);
- mutex_unlock(mutex);
+
+ /*
+ * As we invariably rely on the struct_mutex within the shrinker,
+ * but have a complicated recursion dance, taint all the mutexes used
+ * within the shrinker with the struct_mutex. For completeness, we
+ * taint with all subclass of struct_mutex, even though we should
+ * only need tainting by I915_MM_NORMAL to catch possible ABBA
+ * deadlocks from using struct_mutex inside @mutex.
+ */
+ mutex_acquire(&i915->drm.struct_mutex.dep_map,
+ I915_MM_SHRINKER, 0, _RET_IP_);
+
+ mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
+ mutex_release(&mutex->dep_map, 0, _RET_IP_);
+
+ mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
+
fs_reclaim_release(GFP_KERNEL);
+
+ if (unlock)
+ mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index f29a7ff7c362..9df615eea2d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -26,7 +26,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -102,7 +101,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
resource_size_t ggtt_start;
ggtt_start = I915_READ(PGTBL_CTL);
- if (IS_GEN4(dev_priv))
+ if (IS_GEN(dev_priv, 4))
ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
@@ -156,7 +155,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv,
* GEN3 firmware likes to smash pci bridges into the stolen
* range. Apparently this works.
*/
- if (r == NULL && !IS_GEN3(dev_priv)) {
+ if (r == NULL && !IS_GEN(dev_priv, 3)) {
DRM_ERROR("conflict detected with stolen region: %pR\n",
dsm);
@@ -194,7 +193,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
- WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);
+ WARN(IS_GEN(dev_priv, 5), "ILK stolen reserved found? 0x%08x\n",
+ reg_val);
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
return;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index d9dc9df523b5..16cc9ddbce34 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -27,7 +27,6 @@
#include <linux/string.h>
#include <linux/bitops.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -87,7 +86,7 @@ u32 i915_gem_fence_size(struct drm_i915_private *i915,
}
/* Previous chips need a power-of-two fence region when tiling */
- if (IS_GEN3(i915))
+ if (IS_GEN(i915, 3))
ggtt_size = 1024*1024;
else
ggtt_size = 512*1024;
@@ -162,7 +161,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
return false;
}
- if (IS_GEN2(i915) ||
+ if (IS_GEN(i915, 2) ||
(tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
tile_width = 128;
else
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 9558582c105e..1d3f9a31ad61 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -22,7 +22,6 @@
*
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -50,77 +49,67 @@ struct i915_mmu_notifier {
struct hlist_node node;
struct mmu_notifier mn;
struct rb_root_cached objects;
- struct workqueue_struct *wq;
+ struct i915_mm_struct *mm;
};
struct i915_mmu_object {
struct i915_mmu_notifier *mn;
struct drm_i915_gem_object *obj;
struct interval_tree_node it;
- struct list_head link;
- struct work_struct work;
- bool attached;
};
-static void cancel_userptr(struct work_struct *work)
+static void add_object(struct i915_mmu_object *mo)
{
- struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
- struct drm_i915_gem_object *obj = mo->obj;
- struct work_struct *active;
-
- /* Cancel any active worker and force us to re-evaluate gup */
- mutex_lock(&obj->mm.lock);
- active = fetch_and_zero(&obj->userptr.work);
- mutex_unlock(&obj->mm.lock);
- if (active)
- goto out;
-
- i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
-
- mutex_lock(&obj->base.dev->struct_mutex);
-
- /* We are inside a kthread context and can't be interrupted */
- if (i915_gem_object_unbind(obj) == 0)
- __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
- WARN_ONCE(i915_gem_object_has_pages(obj),
- "Failed to release pages: bind_count=%d, pages_pin_count=%d, pin_global=%d\n",
- obj->bind_count,
- atomic_read(&obj->mm.pages_pin_count),
- obj->pin_global);
-
- mutex_unlock(&obj->base.dev->struct_mutex);
-
-out:
- i915_gem_object_put(obj);
+ GEM_BUG_ON(!RB_EMPTY_NODE(&mo->it.rb));
+ interval_tree_insert(&mo->it, &mo->mn->objects);
}
-static void add_object(struct i915_mmu_object *mo)
+static void del_object(struct i915_mmu_object *mo)
{
- if (mo->attached)
+ if (RB_EMPTY_NODE(&mo->it.rb))
return;
- interval_tree_insert(&mo->it, &mo->mn->objects);
- mo->attached = true;
+ interval_tree_remove(&mo->it, &mo->mn->objects);
+ RB_CLEAR_NODE(&mo->it.rb);
}
-static void del_object(struct i915_mmu_object *mo)
+static void
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
{
- if (!mo->attached)
+ struct i915_mmu_object *mo = obj->userptr.mmu_object;
+
+ /*
+ * During mm_invalidate_range we need to cancel any userptr that
+ * overlaps the range being invalidated. Doing so requires the
+ * struct_mutex, and that risks recursion. In order to cause
+ * recursion, the user must alias the userptr address space with
+ * a GTT mmapping (possible with a MAP_FIXED) - then when we have
+ * to invalidate that mmaping, mm_invalidate_range is called with
+ * the userptr address *and* the struct_mutex held. To prevent that
+ * we set a flag under the i915_mmu_notifier spinlock to indicate
+ * whether this object is valid.
+ */
+ if (!mo)
return;
- interval_tree_remove(&mo->it, &mo->mn->objects);
- mo->attached = false;
+ spin_lock(&mo->mn->lock);
+ if (value)
+ add_object(mo);
+ else
+ del_object(mo);
+ spin_unlock(&mo->mn->lock);
}
-static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
- const struct mmu_notifier_range *range)
+static int
+userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+ const struct mmu_notifier_range *range)
{
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
- struct i915_mmu_object *mo;
struct interval_tree_node *it;
- LIST_HEAD(cancelled);
+ struct mutex *unlock = NULL;
unsigned long end;
+ int ret = 0;
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
return 0;
@@ -131,11 +120,15 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects, range->start, end);
while (it) {
+ struct drm_i915_gem_object *obj;
+
if (!range->blockable) {
- spin_unlock(&mn->lock);
- return -EAGAIN;
+ ret = -EAGAIN;
+ break;
}
- /* The mmu_object is released late when destroying the
+
+ /*
+ * The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
* since our serialisation is via the spinlock and not
@@ -144,29 +137,65 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
* use-after-free we only acquire a reference on the
* object if it is not in the process of being destroyed.
*/
- mo = container_of(it, struct i915_mmu_object, it);
- if (kref_get_unless_zero(&mo->obj->base.refcount))
- queue_work(mn->wq, &mo->work);
+ obj = container_of(it, struct i915_mmu_object, it)->obj;
+ if (!kref_get_unless_zero(&obj->base.refcount)) {
+ it = interval_tree_iter_next(it, range->start, end);
+ continue;
+ }
+ spin_unlock(&mn->lock);
+
+ if (!unlock) {
+ unlock = &mn->mm->i915->drm.struct_mutex;
+
+ switch (mutex_trylock_recursive(unlock)) {
+ default:
+ case MUTEX_TRYLOCK_FAILED:
+ if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) {
+ i915_gem_object_put(obj);
+ return -EINTR;
+ }
+ /* fall through */
+ case MUTEX_TRYLOCK_SUCCESS:
+ break;
+
+ case MUTEX_TRYLOCK_RECURSIVE:
+ unlock = ERR_PTR(-EEXIST);
+ break;
+ }
+ }
+
+ ret = i915_gem_object_unbind(obj);
+ if (ret == 0)
+ ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
+ i915_gem_object_put(obj);
+ if (ret)
+ goto unlock;
- list_add(&mo->link, &cancelled);
- it = interval_tree_iter_next(it, range->start, end);
+ spin_lock(&mn->lock);
+
+ /*
+ * As we do not (yet) protect the mmu from concurrent insertion
+ * over this range, there is no guarantee that this search will
+ * terminate given a pathologic workload.
+ */
+ it = interval_tree_iter_first(&mn->objects, range->start, end);
}
- list_for_each_entry(mo, &cancelled, link)
- del_object(mo);
spin_unlock(&mn->lock);
- if (!list_empty(&cancelled))
- flush_workqueue(mn->wq);
+unlock:
+ if (!IS_ERR_OR_NULL(unlock))
+ mutex_unlock(unlock);
+
+ return ret;
- return 0;
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
- .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
+ .invalidate_range_start = userptr_mn_invalidate_range_start,
};
static struct i915_mmu_notifier *
-i915_mmu_notifier_create(struct mm_struct *mm)
+i915_mmu_notifier_create(struct i915_mm_struct *mm)
{
struct i915_mmu_notifier *mn;
@@ -177,13 +206,7 @@ i915_mmu_notifier_create(struct mm_struct *mm)
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT_CACHED;
- mn->wq = alloc_workqueue("i915-userptr-release",
- WQ_UNBOUND | WQ_MEM_RECLAIM,
- 0);
- if (mn->wq == NULL) {
- kfree(mn);
- return ERR_PTR(-ENOMEM);
- }
+ mn->mm = mm;
return mn;
}
@@ -193,16 +216,14 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
struct i915_mmu_object *mo;
- mo = obj->userptr.mmu_object;
- if (mo == NULL)
+ mo = fetch_and_zero(&obj->userptr.mmu_object);
+ if (!mo)
return;
spin_lock(&mo->mn->lock);
del_object(mo);
spin_unlock(&mo->mn->lock);
kfree(mo);
-
- obj->userptr.mmu_object = NULL;
}
static struct i915_mmu_notifier *
@@ -215,7 +236,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
if (mn)
return mn;
- mn = i915_mmu_notifier_create(mm->mm);
+ mn = i915_mmu_notifier_create(mm);
if (IS_ERR(mn))
err = PTR_ERR(mn);
@@ -238,10 +259,8 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
mutex_unlock(&mm->i915->mm_lock);
up_write(&mm->mm->mmap_sem);
- if (mn && !IS_ERR(mn)) {
- destroy_workqueue(mn->wq);
+ if (mn && !IS_ERR(mn))
kfree(mn);
- }
return err ? ERR_PTR(err) : mm->mn;
}
@@ -264,14 +283,14 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
return PTR_ERR(mn);
mo = kzalloc(sizeof(*mo), GFP_KERNEL);
- if (mo == NULL)
+ if (!mo)
return -ENOMEM;
mo->mn = mn;
mo->obj = obj;
mo->it.start = obj->userptr.ptr;
mo->it.last = obj->userptr.ptr + obj->base.size - 1;
- INIT_WORK(&mo->work, cancel_userptr);
+ RB_CLEAR_NODE(&mo->it.rb);
obj->userptr.mmu_object = mo;
return 0;
@@ -285,13 +304,17 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
return;
mmu_notifier_unregister(&mn->mn, mm);
- destroy_workqueue(mn->wq);
kfree(mn);
}
#else
static void
+__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, bool value)
+{
+}
+
+static void
i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
{
}
@@ -459,42 +482,6 @@ alloc_table:
return st;
}
-static int
-__i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
- bool value)
-{
- int ret = 0;
-
- /* During mm_invalidate_range we need to cancel any userptr that
- * overlaps the range being invalidated. Doing so requires the
- * struct_mutex, and that risks recursion. In order to cause
- * recursion, the user must alias the userptr address space with
- * a GTT mmapping (possible with a MAP_FIXED) - then when we have
- * to invalidate that mmaping, mm_invalidate_range is called with
- * the userptr address *and* the struct_mutex held. To prevent that
- * we set a flag under the i915_mmu_notifier spinlock to indicate
- * whether this object is valid.
- */
-#if defined(CONFIG_MMU_NOTIFIER)
- if (obj->userptr.mmu_object == NULL)
- return 0;
-
- spin_lock(&obj->userptr.mmu_object->mn->lock);
- /* In order to serialise get_pages with an outstanding
- * cancel_userptr, we must drop the struct_mutex and try again.
- */
- if (!value)
- del_object(obj->userptr.mmu_object);
- else if (!work_pending(&obj->userptr.mmu_object->work))
- add_object(obj->userptr.mmu_object);
- else
- ret = -EAGAIN;
- spin_unlock(&obj->userptr.mmu_object->mn->lock);
-#endif
-
- return ret;
-}
-
static void
__i915_gem_userptr_get_pages_worker(struct work_struct *_work)
{
@@ -680,8 +667,11 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
struct sgt_iter sgt_iter;
struct page *page;
- BUG_ON(obj->userptr.work != NULL);
+ /* Cancel any inflight work and force them to restart their gup */
+ obj->userptr.work = NULL;
__i915_gem_userptr_set_active(obj, false);
+ if (!pages)
+ return;
if (obj->mm.madv != I915_MADV_WILLNEED)
obj->mm.dirty = false;
@@ -719,7 +709,8 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
- I915_GEM_OBJECT_IS_SHRINKABLE,
+ I915_GEM_OBJECT_IS_SHRINKABLE |
+ I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
.dmabuf_export = i915_gem_userptr_dmabuf_export,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 07465123c166..1f8e80e31b49 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -594,13 +594,14 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
const struct intel_device_info *info,
+ const struct intel_runtime_info *runtime,
const struct intel_driver_caps *caps)
{
struct drm_printer p = i915_error_printer(m);
intel_device_info_dump_flags(info, &p);
intel_driver_caps_print(caps, &p);
- intel_device_info_dump_topology(&info->sseu, &p);
+ intel_device_info_dump_topology(&runtime->sseu, &p);
}
static void err_print_params(struct drm_i915_error_state_buf *m,
@@ -664,7 +665,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (*error->error_msg)
err_printf(m, "%s\n", error->error_msg);
- err_printf(m, "Kernel: %s\n", init_utsname()->release);
+ err_printf(m, "Kernel: %s %s\n",
+ init_utsname()->release,
+ init_utsname()->machine);
ts = ktime_to_timespec64(error->time);
err_printf(m, "Time: %lld s %ld us\n",
(s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
@@ -735,7 +738,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
}
- if (IS_GEN7(m->i915))
+ if (IS_GEN(m->i915, 7))
err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -844,7 +847,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (error->display)
intel_display_print_error_state(m, error->display);
- err_print_capabilities(m, &error->device_info, &error->driver_caps);
+ err_print_capabilities(m, &error->device_info, &error->runtime_info,
+ &error->driver_caps);
err_print_params(m, &error->params);
err_print_uc(m, &error->uc);
}
@@ -963,17 +967,10 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
kfree(obj);
}
-static __always_inline void free_param(const char *type, void *x)
-{
- if (!__builtin_strcmp(type, "char *"))
- kfree(*(void **)x);
-}
static void cleanup_params(struct i915_gpu_state *error)
{
-#define FREE(T, x, ...) free_param(#T, &error->params.x);
- I915_PARAMS_FOR_EACH(FREE);
-#undef FREE
+ i915_params_free(&error->params);
}
static void cleanup_uc_state(struct i915_gpu_state *error)
@@ -1037,7 +1034,7 @@ i915_error_object_create(struct drm_i915_private *i915,
dma_addr_t dma;
int ret;
- if (!vma)
+ if (!vma || !vma->pages)
return NULL;
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
@@ -1085,7 +1082,7 @@ i915_error_object_create(struct drm_i915_private *i915,
/* The error capture is special as tries to run underneath the normal
* locking rules - so we use the raw version of the i915_gem_active lookup.
*/
-static inline uint32_t
+static inline u32
__active_get_seqno(struct i915_gem_active *active)
{
struct i915_request *request;
@@ -1156,11 +1153,11 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
*
* It's only a small step better than a random number in its current form.
*/
-static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
- struct i915_gpu_state *error,
- int *engine_id)
+static u32 i915_error_generate_code(struct drm_i915_private *dev_priv,
+ struct i915_gpu_state *error,
+ int *engine_id)
{
- uint32_t error_code = 0;
+ u32 error_code = 0;
int i;
/* IPEHR would be an ideal way to detect errors, as it's the gross
@@ -1314,7 +1311,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
i915_reg_t mmio;
- if (IS_GEN7(dev_priv)) {
+ if (IS_GEN(dev_priv, 7)) {
switch (engine->id) {
default:
case RCS:
@@ -1330,7 +1327,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(engine->i915)) {
+ } else if (IS_GEN(engine->i915, 6)) {
mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
/* XXX: gen8 returns to sanity */
@@ -1352,10 +1349,10 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
ee->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE_READ(engine));
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
ee->vm_info.pp_dir_base =
I915_READ(RING_PP_DIR_BASE(engine));
else if (INTEL_GEN(dev_priv) >= 8)
@@ -1725,7 +1722,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->forcewake = I915_READ_FW(FORCEWAKE_VLV);
}
- if (IS_GEN7(dev_priv))
+ if (IS_GEN(dev_priv, 7))
error->err_int = I915_READ(GEN7_ERR_INT);
if (INTEL_GEN(dev_priv) >= 8) {
@@ -1733,7 +1730,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
}
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
error->forcewake = I915_READ_FW(FORCEWAKE);
error->gab_ctl = I915_READ(GAB_CTL);
error->gfx_mode = I915_READ(GFX_MODE);
@@ -1753,7 +1750,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->ccid = I915_READ(CCID);
/* 3: Feature specific registers */
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
+ if (IS_GEN_RANGE(dev_priv, 6, 7)) {
error->gam_ecochk = I915_READ(GAM_ECOCHK);
error->gac_eco = I915_READ(GAC_ECO_BITS);
}
@@ -1777,7 +1774,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
error->ngtier = 1;
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
error->ier = I915_READ16(IER);
} else if (!IS_VALLEYVIEW(dev_priv)) {
error->ier = I915_READ(IER);
@@ -1831,21 +1828,15 @@ static void capture_gen_state(struct i915_gpu_state *error)
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
+ memcpy(&error->runtime_info,
+ RUNTIME_INFO(i915),
+ sizeof(error->runtime_info));
error->driver_caps = i915->caps;
}
-static __always_inline void dup_param(const char *type, void *x)
-{
- if (!__builtin_strcmp(type, "char *"))
- *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
-}
-
static void capture_params(struct i915_gpu_state *error)
{
- error->params = i915_modparams;
-#define DUP(T, x, ...) dup_param(#T, &error->params.x);
- I915_PARAMS_FOR_EACH(DUP);
-#undef DUP
+ i915_params_copy(&error->params, &i915_modparams);
}
static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
@@ -1907,9 +1898,16 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
{
struct i915_gpu_state *error;
+ /* Check if GPU capture has been disabled */
+ error = READ_ONCE(i915->gpu_error.first_error);
+ if (IS_ERR(error))
+ return error;
+
error = kzalloc(sizeof(*error), GFP_ATOMIC);
- if (!error)
- return NULL;
+ if (!error) {
+ i915_disable_error_state(i915, -ENOMEM);
+ return ERR_PTR(-ENOMEM);
+ }
kref_init(&error->ref);
error->i915 = i915;
@@ -1945,11 +1943,8 @@ void i915_capture_error_state(struct drm_i915_private *i915,
return;
error = i915_capture_gpu_state(i915);
- if (!error) {
- DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
- i915_disable_error_state(i915, -ENOMEM);
+ if (IS_ERR(error))
return;
- }
i915_error_capture_msg(i915, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
@@ -1987,7 +1982,7 @@ i915_first_error_state(struct drm_i915_private *i915)
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
- if (error)
+ if (!IS_ERR_OR_NULL(error))
i915_gpu_state_get(error);
spin_unlock_irq(&i915->gpu_error.lock);
@@ -2000,10 +1995,11 @@ void i915_reset_error_state(struct drm_i915_private *i915)
spin_lock_irq(&i915->gpu_error.lock);
error = i915->gpu_error.first_error;
- i915->gpu_error.first_error = NULL;
+ if (error != ERR_PTR(-ENODEV)) /* if disabled, always disabled */
+ i915->gpu_error.first_error = NULL;
spin_unlock_irq(&i915->gpu_error.lock);
- if (!IS_ERR(error))
+ if (!IS_ERR_OR_NULL(error))
i915_gpu_state_put(error);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index ff2652bbb0b0..604291f7762d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -45,6 +45,7 @@ struct i915_gpu_state {
u32 reset_count;
u32 suspend_count;
struct intel_device_info device_info;
+ struct intel_runtime_info runtime_info;
struct intel_driver_caps driver_caps;
struct i915_params params;
@@ -270,8 +271,8 @@ struct i915_gpu_error {
#define I915_RESET_BACKOFF 0
#define I915_RESET_HANDOFF 1
#define I915_RESET_MODESET 2
+#define I915_RESET_ENGINE 3
#define I915_WEDGED (BITS_PER_LONG - 1)
-#define I915_RESET_ENGINE (I915_WEDGED - I915_NUM_ENGINES)
/** Number of times an engine has been reset */
u32 reset_engine_count[I915_NUM_ENGINES];
@@ -282,6 +283,8 @@ struct i915_gpu_error {
/** Reason for the current *global* reset */
const char *reason;
+ struct mutex wedge_mutex; /* serialises wedging/unwedging */
+
/**
* Waitqueue to signal when a hang is detected. Used to for waiters
* to release the struct_mutex for the reset to procede.
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index e869daf9c8a9..73c3e8f519e8 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -28,7 +28,6 @@
*/
#include <linux/compat.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index d447d7d508f4..5fd5080c4ccb 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -31,7 +31,6 @@
#include <linux/sysrq.h>
#include <linux/slab.h>
#include <linux/circ_buf.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -224,10 +223,10 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
- uint32_t mask,
- uint32_t bits)
+ u32 mask,
+ u32 bits)
{
- uint32_t val;
+ u32 val;
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(bits & ~mask);
@@ -251,8 +250,8 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
* version is also available.
*/
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t mask,
- uint32_t bits)
+ u32 mask,
+ u32 bits)
{
spin_lock_irq(&dev_priv->irq_lock);
i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
@@ -301,10 +300,10 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
* @enabled_irq_mask: mask of interrupt bits to enable
*/
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
- uint32_t new_val;
+ u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -331,8 +330,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
@@ -346,13 +345,13 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
}
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
POSTING_READ_FW(GTIMR);
}
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_gt_irq(dev_priv, mask, 0);
}
@@ -391,10 +390,10 @@ static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
- uint32_t new_val;
+ u32 new_val;
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@@ -578,11 +577,11 @@ void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv)
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
- uint32_t new_val;
- uint32_t old_val;
+ u32 new_val;
+ u32 old_val;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -612,10 +611,10 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
*/
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
enum pipe pipe,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
- uint32_t new_val;
+ u32 new_val;
lockdep_assert_held(&dev_priv->irq_lock);
@@ -642,10 +641,10 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
* @enabled_irq_mask: mask of interrupt bits to enable
*/
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- uint32_t interrupt_mask,
- uint32_t enabled_irq_mask)
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
- uint32_t sdeimr = I915_READ(SDEIMR);
+ u32 sdeimr = I915_READ(SDEIMR);
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
@@ -950,7 +949,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -1030,7 +1029,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
if (stime)
*stime = ktime_get();
- if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+ if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
/* No obvious pixelcount register. Only query vertical
* scanout position from Display scan line register.
*/
@@ -1090,7 +1089,7 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
else
position += vtotal - vbl_end;
- if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
+ if (IS_GEN(dev_priv, 2) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
*vpos = position;
*hpos = 0;
} else {
@@ -1183,19 +1182,11 @@ static void notify_ring(struct intel_engine_cs *engine)
struct i915_request *waiter = wait->request;
if (waiter &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &waiter->fence.flags) &&
+ !i915_request_signaled(waiter) &&
intel_wait_check_request(wait, waiter))
rq = i915_request_get(waiter);
tsk = wait->tsk;
- } else {
- if (engine->irq_seqno_barrier &&
- i915_seqno_passed(seqno, wait->seqno - 1)) {
- set_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted);
- tsk = wait->tsk;
- }
}
engine->breadcrumbs.irq_count++;
@@ -1376,8 +1367,8 @@ static void ivybridge_parity_work(struct work_struct *work)
container_of(work, typeof(*dev_priv), l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[6];
- uint32_t misccpctl;
- uint8_t slice = 0;
+ u32 misccpctl;
+ u8 slice = 0;
/* We must turn off DOP level clock gating to access the L3 registers.
* In order to prevent a get/put style interface, acquire struct mutex
@@ -1738,13 +1729,13 @@ static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
#if defined(CONFIG_DEBUG_FS)
static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe,
- uint32_t crc0, uint32_t crc1,
- uint32_t crc2, uint32_t crc3,
- uint32_t crc4)
+ u32 crc0, u32 crc1,
+ u32 crc2, u32 crc3,
+ u32 crc4)
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- uint32_t crcs[5];
+ u32 crcs[5];
spin_lock(&pipe_crc->lock);
/*
@@ -1776,9 +1767,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
static inline void
display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe,
- uint32_t crc0, uint32_t crc1,
- uint32_t crc2, uint32_t crc3,
- uint32_t crc4) {}
+ u32 crc0, u32 crc1,
+ u32 crc2, u32 crc3,
+ u32 crc4) {}
#endif
@@ -1804,7 +1795,7 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- uint32_t res1, res2;
+ u32 res1, res2;
if (INTEL_GEN(dev_priv) >= 3)
res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
@@ -2547,7 +2538,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
I915_WRITE(SDEIIR, pch_iir);
}
- if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
+ if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
ironlake_rps_change_irq_handler(dev_priv);
}
@@ -2938,46 +2929,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-struct wedge_me {
- struct delayed_work work;
- struct drm_i915_private *i915;
- const char *name;
-};
-
-static void wedge_me(struct work_struct *work)
-{
- struct wedge_me *w = container_of(work, typeof(*w), work.work);
-
- dev_err(w->i915->drm.dev,
- "%s timed out, cancelling all in-flight rendering.\n",
- w->name);
- i915_gem_set_wedged(w->i915);
-}
-
-static void __init_wedge(struct wedge_me *w,
- struct drm_i915_private *i915,
- long timeout,
- const char *name)
-{
- w->i915 = i915;
- w->name = name;
-
- INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me);
- schedule_delayed_work(&w->work, timeout);
-}
-
-static void __fini_wedge(struct wedge_me *w)
-{
- cancel_delayed_work_sync(&w->work);
- destroy_delayed_work_on_stack(&w->work);
- w->i915 = NULL;
-}
-
-#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
- for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \
- (W)->i915; \
- __fini_wedge((W)))
-
static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit)
@@ -3188,203 +3139,6 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void i915_reset_device(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- const char *reason)
-{
- struct i915_gpu_error *error = &dev_priv->gpu_error;
- struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
- char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
- char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
- char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
- struct wedge_me w;
-
- kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
-
- DRM_DEBUG_DRIVER("resetting chip\n");
- kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
-
- /* Use a watchdog to ensure that our reset completes */
- i915_wedge_on_timeout(&w, dev_priv, 5*HZ) {
- intel_prepare_reset(dev_priv);
-
- error->reason = reason;
- error->stalled_mask = engine_mask;
-
- /* Signal that locked waiters should reset the GPU */
- smp_mb__before_atomic();
- set_bit(I915_RESET_HANDOFF, &error->flags);
- wake_up_all(&error->wait_queue);
-
- /* Wait for anyone holding the lock to wakeup, without
- * blocking indefinitely on struct_mutex.
- */
- do {
- if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
- i915_reset(dev_priv, engine_mask, reason);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- }
- } while (wait_on_bit_timeout(&error->flags,
- I915_RESET_HANDOFF,
- TASK_UNINTERRUPTIBLE,
- 1));
-
- error->stalled_mask = 0;
- error->reason = NULL;
-
- intel_finish_reset(dev_priv);
- }
-
- if (!test_bit(I915_WEDGED, &error->flags))
- kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
-}
-
-void i915_clear_error_registers(struct drm_i915_private *dev_priv)
-{
- u32 eir;
-
- if (!IS_GEN2(dev_priv))
- I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
-
- if (INTEL_GEN(dev_priv) < 4)
- I915_WRITE(IPEIR, I915_READ(IPEIR));
- else
- I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
-
- I915_WRITE(EIR, I915_READ(EIR));
- eir = I915_READ(EIR);
- if (eir) {
- /*
- * some errors might have become stuck,
- * mask them.
- */
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
- I915_WRITE(EMR, I915_READ(EMR) | eir);
- I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
- }
-
- if (INTEL_GEN(dev_priv) >= 8) {
- I915_WRITE(GEN8_RING_FAULT_REG,
- I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
- POSTING_READ(GEN8_RING_FAULT_REG);
- } else if (INTEL_GEN(dev_priv) >= 6) {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id) {
- I915_WRITE(RING_FAULT_REG(engine),
- I915_READ(RING_FAULT_REG(engine)) &
- ~RING_FAULT_VALID);
- }
- POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
- }
-}
-
-/**
- * i915_handle_error - handle a gpu error
- * @dev_priv: i915 device private
- * @engine_mask: mask representing engines that are hung
- * @flags: control flags
- * @fmt: Error message format string
- *
- * Do some basic checking of register state at error time and
- * dump it to the syslog. Also call i915_capture_error_state() to make
- * sure we get a record and make it available in debugfs. Fire a uevent
- * so userspace knows something bad happened (should trigger collection
- * of a ring dump etc.).
- */
-void i915_handle_error(struct drm_i915_private *dev_priv,
- u32 engine_mask,
- unsigned long flags,
- const char *fmt, ...)
-{
- struct intel_engine_cs *engine;
- unsigned int tmp;
- char error_msg[80];
- char *msg = NULL;
-
- if (fmt) {
- va_list args;
-
- va_start(args, fmt);
- vscnprintf(error_msg, sizeof(error_msg), fmt, args);
- va_end(args);
-
- msg = error_msg;
- }
-
- /*
- * In most cases it's guaranteed that we get here with an RPM
- * reference held, for example because there is a pending GPU
- * request that won't finish until the reset is done. This
- * isn't the case at least when we get here by doing a
- * simulated reset via debugfs, so get an RPM reference.
- */
- intel_runtime_pm_get(dev_priv);
-
- engine_mask &= INTEL_INFO(dev_priv)->ring_mask;
-
- if (flags & I915_ERROR_CAPTURE) {
- i915_capture_error_state(dev_priv, engine_mask, msg);
- i915_clear_error_registers(dev_priv);
- }
-
- /*
- * Try engine reset when available. We fall back to full reset if
- * single reset fails.
- */
- if (intel_has_reset_engine(dev_priv) &&
- !i915_terminally_wedged(&dev_priv->gpu_error)) {
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
- BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
- if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &dev_priv->gpu_error.flags))
- continue;
-
- if (i915_reset_engine(engine, msg) == 0)
- engine_mask &= ~intel_engine_flag(engine);
-
- clear_bit(I915_RESET_ENGINE + engine->id,
- &dev_priv->gpu_error.flags);
- wake_up_bit(&dev_priv->gpu_error.flags,
- I915_RESET_ENGINE + engine->id);
- }
- }
-
- if (!engine_mask)
- goto out;
-
- /* Full reset needs the mutex, stop any other user trying to do so. */
- if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) {
- wait_event(dev_priv->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &dev_priv->gpu_error.flags));
- goto out;
- }
-
- /* Prevent any other reset-engine attempt. */
- for_each_engine(engine, dev_priv, tmp) {
- while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &dev_priv->gpu_error.flags))
- wait_on_bit(&dev_priv->gpu_error.flags,
- I915_RESET_ENGINE + engine->id,
- TASK_UNINTERRUPTIBLE);
- }
-
- i915_reset_device(dev_priv, engine_mask, msg);
-
- for_each_engine(engine, dev_priv, tmp) {
- clear_bit(I915_RESET_ENGINE + engine->id,
- &dev_priv->gpu_error.flags);
- }
-
- clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
- wake_up_all(&dev_priv->gpu_error.reset_queue);
-
-out:
- intel_runtime_pm_put(dev_priv);
-}
-
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
@@ -3417,7 +3171,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3479,7 +3233,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long irqflags;
- uint32_t bit = INTEL_GEN(dev_priv) >= 7 ?
+ u32 bit = INTEL_GEN(dev_priv) >= 7 ?
DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -3586,11 +3340,8 @@ static void ironlake_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- if (IS_GEN5(dev_priv))
- I915_WRITE(HWSTAM, 0xffffffff);
-
GEN3_IRQ_RESET(DE);
- if (IS_GEN7(dev_priv))
+ if (IS_GEN(dev_priv, 7))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
if (IS_HASWELL(dev_priv)) {
@@ -3700,7 +3451,7 @@ static void gen11_irq_reset(struct drm_device *dev)
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
- uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
+ u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@@ -4045,7 +3796,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
@@ -4169,7 +3920,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
/* These are interrupts we'll toggle with the ring mask register */
- uint32_t gt_interrupts[] = {
+ u32 gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
@@ -4183,9 +3934,6 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
- if (HAS_L3_DPF(dev_priv))
- gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
dev_priv->pm_ier = 0x0;
dev_priv->pm_imr = ~dev_priv->pm_ier;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
@@ -4200,8 +3948,8 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
- uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
- uint32_t de_pipe_enables;
+ u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
+ u32 de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
u32 de_misc_masked = GEN8_DE_EDP_PSR;
@@ -4368,8 +4116,6 @@ static void i8xx_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
- I915_WRITE16(HWSTAM, 0xffff);
-
GEN2_IRQ_RESET();
}
@@ -4537,8 +4283,6 @@ static void i915_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
- I915_WRITE(HWSTAM, 0xffffffff);
-
GEN3_IRQ_RESET();
}
@@ -4648,8 +4392,6 @@ static void i965_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
- I915_WRITE(HWSTAM, 0xffffffff);
-
GEN3_IRQ_RESET();
}
@@ -4836,7 +4578,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 8)
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- if (IS_GEN2(dev_priv)) {
+ if (IS_GEN(dev_priv, 2)) {
/* Gen2 doesn't have a hardware frame counter */
dev->max_vblank_count = 0;
} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
@@ -4852,7 +4594,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
* Gen2 doesn't have a hardware frame counter and so depends on
* vblank interrupts to produce sane vblank seuquence numbers.
*/
- if (!IS_GEN2(dev_priv))
+ if (!IS_GEN(dev_priv, 2))
dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
@@ -4924,14 +4666,14 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = ironlake_disable_vblank;
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
} else {
- if (IS_GEN2(dev_priv)) {
+ if (IS_GEN(dev_priv, 2)) {
dev->driver->irq_preinstall = i8xx_irq_reset;
dev->driver->irq_postinstall = i8xx_irq_postinstall;
dev->driver->irq_handler = i8xx_irq_handler;
dev->driver->irq_uninstall = i8xx_irq_reset;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
- } else if (IS_GEN3(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 3)) {
dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_uninstall = i915_irq_reset;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 2e0356561839..9f0539bdaa39 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -77,7 +77,7 @@ i915_param_named(error_capture, bool, 0600,
"triaging and debugging hangs.");
#endif
-i915_param_named_unsafe(enable_hangcheck, bool, 0644,
+i915_param_named_unsafe(enable_hangcheck, bool, 0600,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
@@ -203,3 +203,33 @@ void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
I915_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
+
+static __always_inline void dup_param(const char *type, void *x)
+{
+ if (!__builtin_strcmp(type, "char *"))
+ *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
+}
+
+void i915_params_copy(struct i915_params *dest, const struct i915_params *src)
+{
+ *dest = *src;
+#define DUP(T, x, ...) dup_param(#T, &dest->x);
+ I915_PARAMS_FOR_EACH(DUP);
+#undef DUP
+}
+
+static __always_inline void free_param(const char *type, void *x)
+{
+ if (!__builtin_strcmp(type, "char *")) {
+ kfree(*(void **)x);
+ *(void **)x = NULL;
+ }
+}
+
+/* free the allocated members, *not* the passed in params itself */
+void i915_params_free(struct i915_params *params)
+{
+#define FREE(T, x, ...) free_param(#T, &params->x);
+ I915_PARAMS_FOR_EACH(FREE);
+#undef FREE
+}
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 7e56c516c815..6efcf330bdab 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -33,6 +33,15 @@ struct drm_printer;
#define ENABLE_GUC_SUBMISSION BIT(0)
#define ENABLE_GUC_LOAD_HUC BIT(1)
+/*
+ * Invoke param, a function-like macro, for each i915 param, with arguments:
+ *
+ * param(type, name, value)
+ *
+ * type: parameter type, one of {bool, int, unsigned int, char *}
+ * name: name of the parameter
+ * value: initial/default value of the parameter
+ */
#define I915_PARAMS_FOR_EACH(param) \
param(char *, vbt_firmware, NULL) \
param(int, modeset, -1) \
@@ -78,6 +87,8 @@ struct i915_params {
extern struct i915_params i915_modparams __read_mostly;
void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
+void i915_params_copy(struct i915_params *dest, const struct i915_params *src);
+void i915_params_free(struct i915_params *params);
#endif
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6350db5503cd..dd4aff2b256e 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -82,6 +82,7 @@
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch_display = 1, \
+ .gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
@@ -122,6 +123,7 @@ static const struct intel_device_info intel_i865g_info = {
GEN(3), \
.num_pipes = 2, \
.display.has_gmch_display = 1, \
+ .gpu_reset_clobbers_display = true, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -198,6 +200,7 @@ static const struct intel_device_info intel_pineview_info = {
.num_pipes = 2, \
.display.has_hotplug = 1, \
.display.has_gmch_display = 1, \
+ .gpu_reset_clobbers_display = true, \
.ring_mask = RENDER_RING, \
.has_snoop = true, \
.has_coherent_ggtt = true, \
@@ -228,6 +231,7 @@ static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
.ring_mask = RENDER_RING | BSD_RING,
+ .gpu_reset_clobbers_display = false,
};
static const struct intel_device_info intel_gm45_info = {
@@ -237,6 +241,7 @@ static const struct intel_device_info intel_gm45_info = {
.display.has_fbc = 1,
.display.supports_tv = 1,
.ring_mask = RENDER_RING | BSD_RING,
+ .gpu_reset_clobbers_display = false,
};
#define GEN5_FEATURES \
@@ -532,7 +537,6 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.display.has_fbc = 1, \
.display.has_psr = 1, \
.has_runtime_pm = 1, \
- .has_pooled_eu = 0, \
.display.has_csr = 1, \
.has_rc6 = 1, \
.display.has_dp_mst = 1, \
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2b2eb57ca71f..727118301f91 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1365,7 +1365,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_buffer(dev_priv);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, stream->wakeref);
if (stream->ctx)
oa_put_render_ctx_id(stream);
@@ -1796,7 +1796,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
* be read back from automatically triggered reports, as part of the
* RPT_ID field.
*/
- if (IS_GEN(dev_priv, 9, 11)) {
+ if (IS_GEN_RANGE(dev_priv, 9, 11)) {
I915_WRITE(GEN8_OA_DEBUG,
_MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
@@ -2087,7 +2087,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* In our case we are expecting that taking pm + FORCEWAKE
* references will effectively disable RC6.
*/
- intel_runtime_pm_get(dev_priv);
+ stream->wakeref = intel_runtime_pm_get(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
@@ -2123,7 +2123,7 @@ err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, stream->wakeref);
err_config:
if (stream->ctx)
@@ -2646,7 +2646,7 @@ err:
static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent)
{
return div64_u64(1000000000ULL * (2ULL << exponent),
- 1000ULL * INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz);
+ 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz);
}
/**
@@ -3021,7 +3021,7 @@ static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
(addr >= 0x182300 && addr <= 0x1823A4);
}
-static uint32_t mask_reg_value(u32 reg, u32 val)
+static u32 mask_reg_value(u32 reg, u32 val)
{
/* HALF_SLICE_CHICKEN2 is programmed with a the
* WaDisableSTUnitPowerOptimization workaround. Make sure the value
@@ -3415,7 +3415,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.read = gen8_oa_read;
dev_priv->perf.oa.ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
- if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv)) {
+ if (IS_GEN_RANGE(dev_priv, 8, 9)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
dev_priv->perf.oa.ops.is_valid_mux_reg =
@@ -3431,7 +3431,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = gen8_disable_metric_set;
- if (IS_GEN8(dev_priv)) {
+ if (IS_GEN(dev_priv, 8)) {
dev_priv->perf.oa.ctx_oactxctrl_offset = 0x120;
dev_priv->perf.oa.ctx_flexeu0_offset = 0x2ce;
@@ -3442,7 +3442,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
}
- } else if (IS_GEN(dev_priv, 10, 11)) {
+ } else if (IS_GEN_RANGE(dev_priv, 10, 11)) {
dev_priv->perf.oa.ops.is_valid_b_counter_reg =
gen7_is_valid_b_counter_addr;
dev_priv->perf.oa.ops.is_valid_mux_reg =
@@ -3471,7 +3471,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
oa_sample_rate_hard_limit = 1000 *
- (INTEL_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
+ (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2);
dev_priv->perf.sysctl_header = register_sysctl_table(dev_root);
mutex_init(&dev_priv->perf.metrics_lock);
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index d6c8f8fdfda5..b1cb2d3cae16 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -167,6 +167,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
bool fw = false;
if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
@@ -175,7 +176,8 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
if (!dev_priv->gt.awake)
return;
- if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ if (!wakeref)
return;
for_each_engine(engine, dev_priv, id) {
@@ -210,7 +212,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
if (fw)
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
}
static void
@@ -227,11 +229,12 @@ frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
u32 val;
val = dev_priv->gt_pm.rps.cur_freq;
- if (dev_priv->gt.awake &&
- intel_runtime_pm_get_if_in_use(dev_priv)) {
- val = intel_get_cagf(dev_priv,
- I915_READ_NOTRACE(GEN6_RPSTAT1));
- intel_runtime_pm_put(dev_priv);
+ if (dev_priv->gt.awake) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm_if_in_use(dev_priv, wakeref)
+ val = intel_get_cagf(dev_priv,
+ I915_READ_NOTRACE(GEN6_RPSTAT1));
}
add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT],
@@ -443,12 +446,14 @@ static u64 __get_rc6(struct drm_i915_private *i915)
static u64 get_rc6(struct drm_i915_private *i915)
{
#if IS_ENABLED(CONFIG_PM)
+ intel_wakeref_t wakeref;
unsigned long flags;
u64 val;
- if (intel_runtime_pm_get_if_in_use(i915)) {
+ wakeref = intel_runtime_pm_get_if_in_use(i915);
+ if (wakeref) {
val = __get_rc6(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
/*
* If we are coming back from being runtime suspended we must
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index fe56465cdfd6..cbcb957b7141 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -13,7 +13,7 @@
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
- const struct sseu_dev_info *sseu = &INTEL_INFO(dev_priv)->sseu;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0a7d60509ca7..93cbd057c07a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -117,14 +117,14 @@
*/
typedef struct {
- uint32_t reg;
+ u32 reg;
} i915_reg_t;
#define _MMIO(r) ((const i915_reg_t){ .reg = (r) })
#define INVALID_MMIO_REG _MMIO(0)
-static inline uint32_t i915_mmio_reg_offset(i915_reg_t reg)
+static inline u32 i915_mmio_reg_offset(i915_reg_t reg)
{
return reg.reg;
}
@@ -139,6 +139,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
}
+#define VLV_DISPLAY_BASE 0x180000
+#define VLV_MIPI_BASE VLV_DISPLAY_BASE
+#define BXT_MIPI_BASE 0x60000
+
+#define DISPLAY_MMIO_BASE(dev_priv) (INTEL_INFO(dev_priv)->display_mmio_offset)
+
/*
* Given the first two numbers __a and __b of arbitrarily many evenly spaced
* numbers, pick the 0-based __index'th value.
@@ -179,15 +185,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
* Device info offset array based helpers for groups of registers with unevenly
* spaced base offsets.
*/
-#define _MMIO_PIPE2(pipe, reg) _MMIO(dev_priv->info.pipe_offsets[pipe] - \
- dev_priv->info.pipe_offsets[PIPE_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
-#define _MMIO_TRANS2(pipe, reg) _MMIO(dev_priv->info.trans_offsets[(pipe)] - \
- dev_priv->info.trans_offsets[TRANSCODER_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
-#define _CURSOR2(pipe, reg) _MMIO(dev_priv->info.cursor_offsets[(pipe)] - \
- dev_priv->info.cursor_offsets[PIPE_A] + (reg) + \
- dev_priv->info.display_mmio_offset)
+#define _MMIO_PIPE2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->pipe_offsets[pipe] - \
+ INTEL_INFO(dev_priv)->pipe_offsets[PIPE_A] + (reg) + \
+ DISPLAY_MMIO_BASE(dev_priv))
+#define _MMIO_TRANS2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->trans_offsets[(pipe)] - \
+ INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_A] + (reg) + \
+ DISPLAY_MMIO_BASE(dev_priv))
+#define _CURSOR2(pipe, reg) _MMIO(INTEL_INFO(dev_priv)->cursor_offsets[(pipe)] - \
+ INTEL_INFO(dev_priv)->cursor_offsets[PIPE_A] + (reg) + \
+ DISPLAY_MMIO_BASE(dev_priv))
#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
#define _MASKED_FIELD(mask, value) ({ \
@@ -347,6 +353,24 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GRDOM_MEDIA4 (1 << 8)
#define GEN11_GRDOM_VECS (1 << 13)
#define GEN11_GRDOM_VECS2 (1 << 14)
+#define GEN11_GRDOM_SFC0 (1 << 17)
+#define GEN11_GRDOM_SFC1 (1 << 18)
+
+#define GEN11_VCS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << ((instance) >> 1))
+#define GEN11_VECS_SFC_RESET_BIT(instance) (GEN11_GRDOM_SFC0 << (instance))
+
+#define GEN11_VCS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x88C)
+#define GEN11_VCS_SFC_FORCED_LOCK_BIT (1 << 0)
+#define GEN11_VCS_SFC_LOCK_STATUS(engine) _MMIO((engine)->mmio_base + 0x890)
+#define GEN11_VCS_SFC_USAGE_BIT (1 << 0)
+#define GEN11_VCS_SFC_LOCK_ACK_BIT (1 << 1)
+
+#define GEN11_VECS_SFC_FORCED_LOCK(engine) _MMIO((engine)->mmio_base + 0x201C)
+#define GEN11_VECS_SFC_FORCED_LOCK_BIT (1 << 0)
+#define GEN11_VECS_SFC_LOCK_ACK(engine) _MMIO((engine)->mmio_base + 0x2018)
+#define GEN11_VECS_SFC_LOCK_ACK_BIT (1 << 0)
+#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
+#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
@@ -1790,7 +1814,7 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40
#define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40
#define _CNL_PORT_TX_F_LN0_OFFSET 0x162840
-#define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \
+#define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \
_CNL_PORT_TX_AE_GRP_OFFSET, \
_CNL_PORT_TX_B_GRP_OFFSET, \
_CNL_PORT_TX_B_GRP_OFFSET, \
@@ -1798,7 +1822,7 @@ enum i915_power_well_id {
_CNL_PORT_TX_AE_GRP_OFFSET, \
_CNL_PORT_TX_F_GRP_OFFSET) + \
4 * (dw))
-#define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \
+#define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \
_CNL_PORT_TX_AE_LN0_OFFSET, \
_CNL_PORT_TX_B_LN0_OFFSET, \
_CNL_PORT_TX_B_LN0_OFFSET, \
@@ -1834,9 +1858,9 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW4_LN0_AE 0x162450
#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
-#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4))
-#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4))
-#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \
+#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
+#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
+#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
@@ -1864,8 +1888,12 @@ enum i915_power_well_id {
#define RTERM_SELECT(x) ((x) << 3)
#define RTERM_SELECT_MASK (0x7 << 3)
-#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7))
-#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7))
+#define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
+#define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
+#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
+#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
+#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
+#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
@@ -2592,10 +2620,6 @@ enum i915_power_well_id {
#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3)
-#define VLV_DISPLAY_BASE 0x180000
-#define VLV_MIPI_BASE VLV_DISPLAY_BASE
-#define BXT_MIPI_BASE 0x60000
-
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
@@ -3152,9 +3176,9 @@ enum i915_power_well_id {
/*
* Clock control & power management
*/
-#define _DPLL_A (dev_priv->info.display_mmio_offset + 0x6014)
-#define _DPLL_B (dev_priv->info.display_mmio_offset + 0x6018)
-#define _CHV_DPLL_C (dev_priv->info.display_mmio_offset + 0x6030)
+#define _DPLL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x6014)
+#define _DPLL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x6018)
+#define _CHV_DPLL_C (DISPLAY_MMIO_BASE(dev_priv) + 0x6030)
#define DPLL(pipe) _MMIO_PIPE3((pipe), _DPLL_A, _DPLL_B, _CHV_DPLL_C)
#define VGA0 _MMIO(0x6000)
@@ -3251,9 +3275,9 @@ enum i915_power_well_id {
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
-#define _DPLL_A_MD (dev_priv->info.display_mmio_offset + 0x601c)
-#define _DPLL_B_MD (dev_priv->info.display_mmio_offset + 0x6020)
-#define _CHV_DPLL_C_MD (dev_priv->info.display_mmio_offset + 0x603c)
+#define _DPLL_A_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x601c)
+#define _DPLL_B_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x6020)
+#define _CHV_DPLL_C_MD (DISPLAY_MMIO_BASE(dev_priv) + 0x603c)
#define DPLL_MD(pipe) _MMIO_PIPE3((pipe), _DPLL_A_MD, _DPLL_B_MD, _CHV_DPLL_C_MD)
/*
@@ -3325,7 +3349,7 @@ enum i915_power_well_id {
#define DSTATE_PLL_D3_OFF (1 << 3)
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
-#define DSPCLK_GATE_D _MMIO(dev_priv->info.display_mmio_offset + 0x6200)
+#define DSPCLK_GATE_D _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x6200)
# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
@@ -3465,7 +3489,7 @@ enum i915_power_well_id {
#define _PALETTE_A 0xa000
#define _PALETTE_B 0xa800
#define _CHV_PALETTE_C 0xc000
-#define PALETTE(pipe, i) _MMIO(dev_priv->info.display_mmio_offset + \
+#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
_PICK((pipe), _PALETTE_A, \
_PALETTE_B, _CHV_PALETTE_C) + \
(i) * 4)
@@ -4298,7 +4322,7 @@ enum {
/* Hotplug control (945+ only) */
-#define PORT_HOTPLUG_EN _MMIO(dev_priv->info.display_mmio_offset + 0x61110)
+#define PORT_HOTPLUG_EN _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61110)
#define PORTB_HOTPLUG_INT_EN (1 << 29)
#define PORTC_HOTPLUG_INT_EN (1 << 28)
#define PORTD_HOTPLUG_INT_EN (1 << 27)
@@ -4328,7 +4352,7 @@ enum {
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
-#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
+#define PORT_HOTPLUG_STAT _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61114)
/*
* HDMI/DP bits are g4x+
*
@@ -4410,7 +4434,7 @@ enum {
#define PORT_DFT_I9XX _MMIO(0x61150)
#define DC_BALANCE_RESET (1 << 25)
-#define PORT_DFT2_G4X _MMIO(dev_priv->info.display_mmio_offset + 0x61154)
+#define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
@@ -4663,7 +4687,6 @@ enum {
#define EDP_FORCE_VDD (1 << 3)
#define EDP_BLC_ENABLE (1 << 2)
#define PANEL_POWER_RESET (1 << 1)
-#define PANEL_POWER_OFF (0 << 0)
#define PANEL_POWER_ON (1 << 0)
#define _PP_ON_DELAYS 0x61208
@@ -4695,7 +4718,7 @@ enum {
#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
/* Panel fitting */
-#define PFIT_CONTROL _MMIO(dev_priv->info.display_mmio_offset + 0x61230)
+#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
@@ -4713,7 +4736,7 @@ enum {
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
-#define PFIT_PGM_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61234)
+#define PFIT_PGM_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -4725,25 +4748,25 @@ enum {
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
-#define PFIT_AUTO_RATIOS _MMIO(dev_priv->info.display_mmio_offset + 0x61238)
+#define PFIT_AUTO_RATIOS _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61238)
-#define _VLV_BLC_PWM_CTL2_A (dev_priv->info.display_mmio_offset + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (dev_priv->info.display_mmio_offset + 0x61350)
+#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
_VLV_BLC_PWM_CTL2_B)
-#define _VLV_BLC_PWM_CTL_A (dev_priv->info.display_mmio_offset + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (dev_priv->info.display_mmio_offset + 0x61354)
+#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
_VLV_BLC_PWM_CTL_B)
-#define _VLV_BLC_HIST_CTL_A (dev_priv->info.display_mmio_offset + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (dev_priv->info.display_mmio_offset + 0x61360)
+#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
_VLV_BLC_HIST_CTL_B)
/* Backlight control */
-#define BLC_PWM_CTL2 _MMIO(dev_priv->info.display_mmio_offset + 0x61250) /* 965+ only */
+#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
#define BLM_PIPE_SELECT (1 << 29)
@@ -4766,7 +4789,7 @@ enum {
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
#define BLM_PHASE_IN_INCR_SHIFT (0)
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
-#define BLC_PWM_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61254)
+#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
@@ -4788,7 +4811,7 @@ enum {
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
-#define BLC_HIST_CTL _MMIO(dev_priv->info.display_mmio_offset + 0x61260)
+#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
#define BLM_HISTOGRAM_ENABLE (1 << 31)
/* New registers for PCH-split platforms. Safe where new bits show up, the
@@ -5412,47 +5435,47 @@ enum {
* is 20 bytes in each direction, hence the 5 fixed
* data registers
*/
-#define _DPA_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64010)
-#define _DPA_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64014)
-#define _DPA_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64018)
-#define _DPA_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6401c)
-#define _DPA_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64020)
-#define _DPA_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64024)
-
-#define _DPB_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64110)
-#define _DPB_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64114)
-#define _DPB_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64118)
-#define _DPB_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6411c)
-#define _DPB_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64120)
-#define _DPB_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64124)
-
-#define _DPC_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64210)
-#define _DPC_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64214)
-#define _DPC_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64218)
-#define _DPC_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6421c)
-#define _DPC_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64220)
-#define _DPC_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64224)
-
-#define _DPD_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64310)
-#define _DPD_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64314)
-#define _DPD_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64318)
-#define _DPD_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6431c)
-#define _DPD_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64320)
-#define _DPD_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64324)
-
-#define _DPE_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64410)
-#define _DPE_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64414)
-#define _DPE_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64418)
-#define _DPE_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6441c)
-#define _DPE_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64420)
-#define _DPE_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64424)
-
-#define _DPF_AUX_CH_CTL (dev_priv->info.display_mmio_offset + 0x64510)
-#define _DPF_AUX_CH_DATA1 (dev_priv->info.display_mmio_offset + 0x64514)
-#define _DPF_AUX_CH_DATA2 (dev_priv->info.display_mmio_offset + 0x64518)
-#define _DPF_AUX_CH_DATA3 (dev_priv->info.display_mmio_offset + 0x6451c)
-#define _DPF_AUX_CH_DATA4 (dev_priv->info.display_mmio_offset + 0x64520)
-#define _DPF_AUX_CH_DATA5 (dev_priv->info.display_mmio_offset + 0x64524)
+#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
+#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
+#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018)
+#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c)
+#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020)
+#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024)
+
+#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
+#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
+#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118)
+#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c)
+#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120)
+#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124)
+
+#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210)
+#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214)
+#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218)
+#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c)
+#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220)
+#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224)
+
+#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310)
+#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314)
+#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318)
+#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c)
+#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320)
+#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324)
+
+#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410)
+#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414)
+#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418)
+#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c)
+#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420)
+#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424)
+
+#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510)
+#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514)
+#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518)
+#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c)
+#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520)
+#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524)
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL)
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
@@ -5728,7 +5751,7 @@ enum {
#define DPINVGTT_STATUS_MASK 0xff
#define DPINVGTT_STATUS_MASK_CHV 0xfff
-#define DSPARB _MMIO(dev_priv->info.display_mmio_offset + 0x70030)
+#define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DSPARB_CSTART_SHIFT 7
#define DSPARB_BSTART_MASK (0x7f)
@@ -5763,7 +5786,7 @@ enum {
#define DSPARB_SPRITEF_MASK_VLV (0xff << 8)
/* pnv/gen4/g4x/vlv/chv */
-#define DSPFW1 _MMIO(dev_priv->info.display_mmio_offset + 0x70034)
+#define DSPFW1 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff << 23)
#define DSPFW_CURSORB_SHIFT 16
@@ -5774,7 +5797,7 @@ enum {
#define DSPFW_PLANEA_SHIFT 0
#define DSPFW_PLANEA_MASK (0x7f << 0)
#define DSPFW_PLANEA_MASK_VLV (0xff << 0) /* vlv/chv */
-#define DSPFW2 _MMIO(dev_priv->info.display_mmio_offset + 0x70038)
+#define DSPFW2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70038)
#define DSPFW_FBC_SR_EN (1 << 31) /* g4x */
#define DSPFW_FBC_SR_SHIFT 28
#define DSPFW_FBC_SR_MASK (0x7 << 28) /* g4x */
@@ -5790,7 +5813,7 @@ enum {
#define DSPFW_SPRITEA_SHIFT 0
#define DSPFW_SPRITEA_MASK (0x7f << 0) /* g4x */
#define DSPFW_SPRITEA_MASK_VLV (0xff << 0) /* vlv/chv */
-#define DSPFW3 _MMIO(dev_priv->info.display_mmio_offset + 0x7003c)
+#define DSPFW3 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x7003c)
#define DSPFW_HPLL_SR_EN (1 << 31)
#define PINEVIEW_SELF_REFRESH_EN (1 << 30)
#define DSPFW_CURSOR_SR_SHIFT 24
@@ -6206,35 +6229,35 @@ enum {
* [10:1f] all
* [30:32] all
*/
-#define SWF0(i) _MMIO(dev_priv->info.display_mmio_offset + 0x70410 + (i) * 4)
-#define SWF1(i) _MMIO(dev_priv->info.display_mmio_offset + 0x71410 + (i) * 4)
-#define SWF3(i) _MMIO(dev_priv->info.display_mmio_offset + 0x72414 + (i) * 4)
+#define SWF0(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70410 + (i) * 4)
+#define SWF1(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x71410 + (i) * 4)
+#define SWF3(i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
/* Pipe B */
-#define _PIPEBDSL (dev_priv->info.display_mmio_offset + 0x71000)
-#define _PIPEBCONF (dev_priv->info.display_mmio_offset + 0x71008)
-#define _PIPEBSTAT (dev_priv->info.display_mmio_offset + 0x71024)
+#define _PIPEBDSL (DISPLAY_MMIO_BASE(dev_priv) + 0x71000)
+#define _PIPEBCONF (DISPLAY_MMIO_BASE(dev_priv) + 0x71008)
+#define _PIPEBSTAT (DISPLAY_MMIO_BASE(dev_priv) + 0x71024)
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
-#define _PIPEB_FRMCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71040)
-#define _PIPEB_FLIPCOUNT_G4X (dev_priv->info.display_mmio_offset + 0x71044)
+#define _PIPEB_FRMCOUNT_G4X (DISPLAY_MMIO_BASE(dev_priv) + 0x71040)
+#define _PIPEB_FLIPCOUNT_G4X (DISPLAY_MMIO_BASE(dev_priv) + 0x71044)
/* Display B control */
-#define _DSPBCNTR (dev_priv->info.display_mmio_offset + 0x71180)
+#define _DSPBCNTR (DISPLAY_MMIO_BASE(dev_priv) + 0x71180)
#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
-#define _DSPBADDR (dev_priv->info.display_mmio_offset + 0x71184)
-#define _DSPBSTRIDE (dev_priv->info.display_mmio_offset + 0x71188)
-#define _DSPBPOS (dev_priv->info.display_mmio_offset + 0x7118C)
-#define _DSPBSIZE (dev_priv->info.display_mmio_offset + 0x71190)
-#define _DSPBSURF (dev_priv->info.display_mmio_offset + 0x7119C)
-#define _DSPBTILEOFF (dev_priv->info.display_mmio_offset + 0x711A4)
-#define _DSPBOFFSET (dev_priv->info.display_mmio_offset + 0x711A4)
-#define _DSPBSURFLIVE (dev_priv->info.display_mmio_offset + 0x711AC)
+#define _DSPBADDR (DISPLAY_MMIO_BASE(dev_priv) + 0x71184)
+#define _DSPBSTRIDE (DISPLAY_MMIO_BASE(dev_priv) + 0x71188)
+#define _DSPBPOS (DISPLAY_MMIO_BASE(dev_priv) + 0x7118C)
+#define _DSPBSIZE (DISPLAY_MMIO_BASE(dev_priv) + 0x71190)
+#define _DSPBSURF (DISPLAY_MMIO_BASE(dev_priv) + 0x7119C)
+#define _DSPBTILEOFF (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
+#define _DSPBOFFSET (DISPLAY_MMIO_BASE(dev_priv) + 0x711A4)
+#define _DSPBSURFLIVE (DISPLAY_MMIO_BASE(dev_priv) + 0x711AC)
/* ICL DSI 0 and 1 */
#define _PIPEDSI0CONF 0x7b008
@@ -8786,7 +8809,7 @@ enum {
#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2)
/* Audio */
-#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
+#define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index cefefc11d922..f941e40fd373 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -29,6 +29,7 @@
#include <linux/sched/signal.h>
#include "i915_drv.h"
+#include "i915_reset.h"
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
@@ -111,99 +112,10 @@ i915_request_remove_from_client(struct i915_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
+static void reserve_gt(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- struct i915_timeline *timeline;
- enum intel_engine_id id;
- int ret;
-
- /* Carefully retire all requests without writing to the rings */
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (ret)
- return ret;
-
- GEM_BUG_ON(i915->gt.active_requests);
-
- /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- for_each_engine(engine, i915, id) {
- GEM_TRACE("%s seqno %d (current %d) -> %d\n",
- engine->name,
- engine->timeline.seqno,
- intel_engine_get_seqno(engine),
- seqno);
-
- if (seqno == engine->timeline.seqno)
- continue;
-
- kthread_park(engine->breadcrumbs.signaler);
-
- if (!i915_seqno_passed(seqno, engine->timeline.seqno)) {
- /* Flush any waiters before we reuse the seqno */
- intel_engine_disarm_breadcrumbs(engine);
- intel_engine_init_hangcheck(engine);
- GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals));
- }
-
- /* Check we are idle before we fiddle with hw state! */
- GEM_BUG_ON(!intel_engine_is_idle(engine));
- GEM_BUG_ON(i915_gem_active_isset(&engine->timeline.last_request));
-
- /* Finally reset hw state */
- intel_engine_init_global_seqno(engine, seqno);
- engine->timeline.seqno = seqno;
-
- kthread_unpark(engine->breadcrumbs.signaler);
- }
-
- list_for_each_entry(timeline, &i915->gt.timelines, link)
- memset(timeline->global_sync, 0, sizeof(timeline->global_sync));
-
- i915->gt.request_serial = seqno;
-
- return 0;
-}
-
-int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
-{
- struct drm_i915_private *i915 = to_i915(dev);
-
- lockdep_assert_held(&i915->drm.struct_mutex);
-
- if (seqno == 0)
- return -EINVAL;
-
- /* HWS page needs to be set less than what we will inject to ring */
- return reset_all_global_seqno(i915, seqno - 1);
-}
-
-static int reserve_gt(struct drm_i915_private *i915)
-{
- int ret;
-
- /*
- * Reservation is fine until we may need to wrap around
- *
- * By incrementing the serial for every request, we know that no
- * individual engine may exceed that serial (as each is reset to 0
- * on any wrap). This protects even the most pessimistic of migrations
- * of every request from all engines onto just one.
- */
- while (unlikely(++i915->gt.request_serial == 0)) {
- ret = reset_all_global_seqno(i915, 0);
- if (ret) {
- i915->gt.request_serial--;
- return ret;
- }
- }
-
if (!i915->gt.active_requests++)
i915_gem_unpark(i915);
-
- return 0;
}
static void unreserve_gt(struct drm_i915_private *i915)
@@ -286,7 +198,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
spin_unlock(&engine->timeline.lock);
spin_lock(&rq->lock);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ if (!i915_request_signaled(rq))
dma_fence_signal_locked(&rq->fence);
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
intel_engine_cancel_signaling(rq);
@@ -431,6 +343,13 @@ static void move_to_timeline(struct i915_request *request,
spin_unlock(&request->timeline->lock);
}
+static u32 next_global_seqno(struct i915_timeline *tl)
+{
+ if (!++tl->seqno)
+ ++tl->seqno;
+ return tl->seqno;
+}
+
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -447,7 +366,7 @@ void __i915_request_submit(struct i915_request *request)
GEM_BUG_ON(request->global_seqno);
- seqno = timeline_get_seqno(&engine->timeline);
+ seqno = next_global_seqno(&engine->timeline);
GEM_BUG_ON(!seqno);
GEM_BUG_ON(intel_engine_signaled(engine, seqno));
@@ -566,6 +485,38 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static void ring_retire_requests(struct intel_ring *ring)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
+ if (!i915_request_completed(rq))
+ break;
+
+ i915_request_retire(rq);
+ }
+}
+
+static noinline struct i915_request *
+i915_request_alloc_slow(struct intel_context *ce)
+{
+ struct intel_ring *ring = ce->ring;
+ struct i915_request *rq;
+
+ if (list_empty(&ring->request_list))
+ goto out;
+
+ /* Ratelimit ourselves to prevent oom from malicious clients */
+ rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+ cond_synchronize_rcu(rq->rcustate);
+
+ /* Retire our old requests in the hope that we free some */
+ ring_retire_requests(ring);
+
+out:
+ return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
+}
+
/**
* i915_request_alloc - allocate a request structure
*
@@ -608,13 +559,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
if (IS_ERR(ce))
return ERR_CAST(ce);
- ret = reserve_gt(i915);
- if (ret)
- goto err_unpin;
-
- ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
- if (ret)
- goto err_unreserve;
+ reserve_gt(i915);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -654,15 +599,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq = kmem_cache_alloc(i915->requests,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
- i915_retire_requests(i915);
-
- /* Ratelimit ourselves to prevent oom from malicious clients */
- rq = i915_gem_active_raw(&ce->ring->timeline->last_request,
- &i915->drm.struct_mutex);
- if (rq)
- cond_synchronize_rcu(rq->rcustate);
-
- rq = kmem_cache_alloc(i915->requests, GFP_KERNEL);
+ rq = i915_request_alloc_slow(ce);
if (!rq) {
ret = -ENOMEM;
goto err_unreserve;
@@ -707,9 +644,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* i915_request_add() call can't fail. Note that the reserve may need
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
+ *
+ * Note that due to how we add reserved_space to intel_ring_begin()
+ * we need to double our request to ensure that if we need to wrap
+ * around inside i915_request_add() there is sufficient space at
+ * the beginning of the ring as well.
*/
- rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
- GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
+ rq->reserved_space = 2 * engine->emit_breadcrumb_sz * sizeof(u32);
/*
* Record the position of the start of the request so that
@@ -719,11 +660,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
- /* Unconditionally invalidate GPU caches and TLBs. */
- ret = engine->emit_flush(rq, EMIT_INVALIDATE);
- if (ret)
- goto err_unwind;
-
ret = engine->request_alloc(rq);
if (ret)
goto err_unwind;
@@ -748,7 +684,6 @@ err_unwind:
kmem_cache_free(i915->requests, rq);
err_unreserve:
unreserve_gt(i915);
-err_unpin:
intel_context_unpin(ce);
return ERR_PTR(ret);
}
@@ -776,34 +711,12 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
- return ret < 0 ? ret : 0;
- }
-
- if (to->engine->semaphore.sync_to) {
- u32 seqno;
-
- GEM_BUG_ON(!from->engine->semaphore.signal);
-
- seqno = i915_request_global_seqno(from);
- if (!seqno)
- goto await_dma_fence;
-
- if (seqno <= to->timeline->global_sync[from->engine->id])
- return 0;
-
- trace_i915_gem_ring_sync_to(to, from);
- ret = to->engine->semaphore.sync_to(to, from);
- if (ret)
- return ret;
-
- to->timeline->global_sync[from->engine->id] = seqno;
- return 0;
+ } else {
+ ret = i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
}
-await_dma_fence:
- ret = i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
return ret < 0 ? ret : 0;
}
@@ -979,8 +892,8 @@ void i915_request_add(struct i915_request *request)
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
+ GEM_BUG_ON(request->reserved_space > request->ring->space);
request->reserved_space = 0;
- engine->emit_flush(request, EMIT_FLUSH);
/*
* Record the position of the start of the breadcrumb so that
@@ -1298,13 +1211,7 @@ restart:
set_current_state(state);
wakeup:
- /*
- * Carefully check if the request is complete, giving time
- * for the seqno to be visible following the interrupt.
- * We also have to check in case we are kicked by the GPU
- * reset in order to drop the struct_mutex.
- */
- if (__i915_request_irq_complete(rq))
+ if (i915_request_completed(rq))
break;
/*
@@ -1343,19 +1250,6 @@ complete:
return timeout;
}
-static void ring_retire_requests(struct intel_ring *ring)
-{
- struct i915_request *request, *next;
-
- list_for_each_entry_safe(request, next,
- &ring->request_list, ring_link) {
- if (!i915_request_completed(request))
- break;
-
- i915_request_retire(request);
- }
-}
-
void i915_retire_requests(struct drm_i915_private *i915)
{
struct intel_ring *ring, *tmp;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 90e9d170a0cd..c0f084ca4f29 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -30,7 +30,6 @@
#include "i915_gem.h"
#include "i915_scheduler.h"
#include "i915_sw_fence.h"
-#include "i915_scheduler.h"
#include <uapi/drm/i915_drm.h>
@@ -281,6 +280,11 @@ long i915_request_wait(struct i915_request *rq,
#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */
#define I915_WAIT_FOR_IDLE_BOOST BIT(4)
+static inline bool i915_request_signaled(const struct i915_request *rq)
+{
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
+}
+
static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
u32 seqno);
static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
new file mode 100644
index 000000000000..342d9ee42601
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -0,0 +1,1389 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2008-2018 Intel Corporation
+ */
+
+#include <linux/sched/mm.h>
+
+#include "i915_drv.h"
+#include "i915_gpu_error.h"
+#include "i915_reset.h"
+
+#include "intel_guc.h"
+
+static void engine_skip_context(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ struct i915_gem_context *hung_ctx = rq->gem_context;
+ struct i915_timeline *timeline = rq->timeline;
+ unsigned long flags;
+
+ GEM_BUG_ON(timeline == &engine->timeline);
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ spin_lock(&timeline->lock);
+
+ list_for_each_entry_continue(rq, &engine->timeline.requests, link)
+ if (rq->gem_context == hung_ctx)
+ i915_request_skip(rq, -EIO);
+
+ list_for_each_entry(rq, &timeline->requests, link)
+ i915_request_skip(rq, -EIO);
+
+ spin_unlock(&timeline->lock);
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
+static void client_mark_guilty(struct drm_i915_file_private *file_priv,
+ const struct i915_gem_context *ctx)
+{
+ unsigned int score;
+ unsigned long prev_hang;
+
+ if (i915_gem_context_is_banned(ctx))
+ score = I915_CLIENT_SCORE_CONTEXT_BAN;
+ else
+ score = 0;
+
+ prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+ if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+ score += I915_CLIENT_SCORE_HANG_FAST;
+
+ if (score) {
+ atomic_add(score, &file_priv->ban_score);
+
+ DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
+ ctx->name, score,
+ atomic_read(&file_priv->ban_score));
+ }
+}
+
+static void context_mark_guilty(struct i915_gem_context *ctx)
+{
+ unsigned int score;
+ bool banned, bannable;
+
+ atomic_inc(&ctx->guilty_count);
+
+ bannable = i915_gem_context_is_bannable(ctx);
+ score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+ banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+
+ /* Cool contexts don't accumulate client ban score */
+ if (!bannable)
+ return;
+
+ if (banned) {
+ DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count),
+ score);
+ i915_gem_context_set_banned(ctx);
+ }
+
+ if (!IS_ERR_OR_NULL(ctx->file_priv))
+ client_mark_guilty(ctx->file_priv, ctx);
+}
+
+static void context_mark_innocent(struct i915_gem_context *ctx)
+{
+ atomic_inc(&ctx->active_count);
+}
+
+static void gen3_stop_engine(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ const u32 base = engine->mmio_base;
+
+ if (intel_engine_stop_cs(engine))
+ DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
+
+ I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
+ POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
+
+ I915_WRITE_FW(RING_HEAD(base), 0);
+ I915_WRITE_FW(RING_TAIL(base), 0);
+ POSTING_READ_FW(RING_TAIL(base));
+
+ /* The ring must be empty before it is disabled */
+ I915_WRITE_FW(RING_CTL(base), 0);
+
+ /* Check acts as a post */
+ if (I915_READ_FW(RING_HEAD(base)) != 0)
+ DRM_DEBUG_DRIVER("%s: ring head not parked\n",
+ engine->name);
+}
+
+static void i915_stop_engines(struct drm_i915_private *i915,
+ unsigned int engine_mask)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ if (INTEL_GEN(i915) < 3)
+ return;
+
+ for_each_engine_masked(engine, i915, engine_mask, id)
+ gen3_stop_engine(engine);
+}
+
+static bool i915_in_reset(struct pci_dev *pdev)
+{
+ u8 gdrst;
+
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
+ return gdrst & GRDOM_RESET_STATUS;
+}
+
+static int i915_do_reset(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+ int err;
+
+ /* Assert reset for at least 20 usec, and wait for acknowledgement. */
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ usleep_range(50, 200);
+ err = wait_for(i915_in_reset(pdev), 500);
+
+ /* Clear the reset request. */
+ pci_write_config_byte(pdev, I915_GDRST, 0);
+ usleep_range(50, 200);
+ if (!err)
+ err = wait_for(!i915_in_reset(pdev), 500);
+
+ return err;
+}
+
+static bool g4x_reset_complete(struct pci_dev *pdev)
+{
+ u8 gdrst;
+
+ pci_read_config_byte(pdev, I915_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int g33_do_reset(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
+ return wait_for(g4x_reset_complete(pdev), 500);
+}
+
+static int g4x_do_reset(struct drm_i915_private *dev_priv,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ int ret;
+
+ /* WaVcpClkGateDisableForMediaReset:ctg,elk */
+ I915_WRITE(VDECCLK_GATE_D,
+ I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ(VDECCLK_GATE_D);
+
+ pci_write_config_byte(pdev, I915_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ ret = wait_for(g4x_reset_complete(pdev), 500);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ goto out;
+ }
+
+ pci_write_config_byte(pdev, I915_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(g4x_reset_complete(pdev), 500);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ goto out;
+ }
+
+out:
+ pci_write_config_byte(pdev, I915_GDRST, 0);
+
+ I915_WRITE(VDECCLK_GATE_D,
+ I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
+ POSTING_READ(VDECCLK_GATE_D);
+
+ return ret;
+}
+
+static int ironlake_do_reset(struct drm_i915_private *dev_priv,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ int ret;
+
+ I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+ ret = intel_wait_for_register(dev_priv,
+ ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
+ 500);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ goto out;
+ }
+
+ I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+ ret = intel_wait_for_register(dev_priv,
+ ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
+ 500);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ goto out;
+ }
+
+out:
+ I915_WRITE(ILK_GDSR, 0);
+ POSTING_READ(ILK_GDSR);
+ return ret;
+}
+
+/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
+static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
+ u32 hw_domain_mask)
+{
+ int err;
+
+ /*
+ * GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ I915_WRITE_FW(GEN6_GDRST, hw_domain_mask);
+
+ /* Wait for the device to ack the reset requests */
+ err = __intel_wait_for_register_fw(dev_priv,
+ GEN6_GDRST, hw_domain_mask, 0,
+ 500, 0,
+ NULL);
+ if (err)
+ DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
+
+ return err;
+}
+
+static int gen6_reset_engines(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ struct intel_engine_cs *engine;
+ const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+ [RCS] = GEN6_GRDOM_RENDER,
+ [BCS] = GEN6_GRDOM_BLT,
+ [VCS] = GEN6_GRDOM_MEDIA,
+ [VCS2] = GEN8_GRDOM_MEDIA2,
+ [VECS] = GEN6_GRDOM_VECS,
+ };
+ u32 hw_mask;
+
+ if (engine_mask == ALL_ENGINES) {
+ hw_mask = GEN6_GRDOM_FULL;
+ } else {
+ unsigned int tmp;
+
+ hw_mask = 0;
+ for_each_engine_masked(engine, i915, engine_mask, tmp)
+ hw_mask |= hw_engine_mask[engine->id];
+ }
+
+ return gen6_hw_domain_reset(i915, hw_mask);
+}
+
+static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
+ u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
+ i915_reg_t sfc_usage;
+ u32 sfc_usage_bit;
+ u32 sfc_reset_bit;
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
+ return 0;
+
+ sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
+ sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
+
+ sfc_forced_lock_ack = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_forced_lock_ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
+
+ sfc_usage = GEN11_VCS_SFC_LOCK_STATUS(engine);
+ sfc_usage_bit = GEN11_VCS_SFC_USAGE_BIT;
+ sfc_reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
+ break;
+
+ case VIDEO_ENHANCEMENT_CLASS:
+ sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
+ sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+
+ sfc_forced_lock_ack = GEN11_VECS_SFC_LOCK_ACK(engine);
+ sfc_forced_lock_ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
+
+ sfc_usage = GEN11_VECS_SFC_USAGE(engine);
+ sfc_usage_bit = GEN11_VECS_SFC_USAGE_BIT;
+ sfc_reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
+ break;
+
+ default:
+ return 0;
+ }
+
+ /*
+ * Tell the engine that a software reset is going to happen. The engine
+ * will then try to force lock the SFC (if currently locked, it will
+ * remain so until we tell the engine it is safe to unlock; if currently
+ * unlocked, it will ignore this and all new lock requests). If SFC
+ * ends up being locked to the engine we want to reset, we have to reset
+ * it as well (we will unlock it once the reset sequence is completed).
+ */
+ I915_WRITE_FW(sfc_forced_lock,
+ I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
+
+ if (__intel_wait_for_register_fw(dev_priv,
+ sfc_forced_lock_ack,
+ sfc_forced_lock_ack_bit,
+ sfc_forced_lock_ack_bit,
+ 1000, 0, NULL)) {
+ DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ return 0;
+ }
+
+ if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
+ return sfc_reset_bit;
+
+ return 0;
+}
+
+static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
+ struct intel_engine_cs *engine)
+{
+ u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ i915_reg_t sfc_forced_lock;
+ u32 sfc_forced_lock_bit;
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
+ return;
+
+ sfc_forced_lock = GEN11_VCS_SFC_FORCED_LOCK(engine);
+ sfc_forced_lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
+ break;
+
+ case VIDEO_ENHANCEMENT_CLASS:
+ sfc_forced_lock = GEN11_VECS_SFC_FORCED_LOCK(engine);
+ sfc_forced_lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
+ break;
+
+ default:
+ return;
+ }
+
+ I915_WRITE_FW(sfc_forced_lock,
+ I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
+}
+
+static int gen11_reset_engines(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ const u32 hw_engine_mask[I915_NUM_ENGINES] = {
+ [RCS] = GEN11_GRDOM_RENDER,
+ [BCS] = GEN11_GRDOM_BLT,
+ [VCS] = GEN11_GRDOM_MEDIA,
+ [VCS2] = GEN11_GRDOM_MEDIA2,
+ [VCS3] = GEN11_GRDOM_MEDIA3,
+ [VCS4] = GEN11_GRDOM_MEDIA4,
+ [VECS] = GEN11_GRDOM_VECS,
+ [VECS2] = GEN11_GRDOM_VECS2,
+ };
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
+ u32 hw_mask;
+ int ret;
+
+ BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
+
+ if (engine_mask == ALL_ENGINES) {
+ hw_mask = GEN11_GRDOM_FULL;
+ } else {
+ hw_mask = 0;
+ for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ hw_mask |= hw_engine_mask[engine->id];
+ hw_mask |= gen11_lock_sfc(i915, engine);
+ }
+ }
+
+ ret = gen6_hw_domain_reset(i915, hw_mask);
+
+ if (engine_mask != ALL_ENGINES)
+ for_each_engine_masked(engine, i915, engine_mask, tmp)
+ gen11_unlock_sfc(i915, engine);
+
+ return ret;
+}
+
+static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+
+ ret = __intel_wait_for_register_fw(dev_priv,
+ RING_RESET_CTL(engine->mmio_base),
+ RESET_CTL_READY_TO_RESET,
+ RESET_CTL_READY_TO_RESET,
+ 700, 0,
+ NULL);
+ if (ret)
+ DRM_ERROR("%s: reset request timeout\n", engine->name);
+
+ return ret;
+}
+
+static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+}
+
+static int gen8_reset_engines(struct drm_i915_private *i915,
+ unsigned int engine_mask,
+ unsigned int retry)
+{
+ struct intel_engine_cs *engine;
+ const bool reset_non_ready = retry >= 1;
+ unsigned int tmp;
+ int ret;
+
+ for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ ret = gen8_engine_reset_prepare(engine);
+ if (ret && !reset_non_ready)
+ goto skip_reset;
+
+ /*
+ * If this is not the first failed attempt to prepare,
+ * we decide to proceed anyway.
+ *
+ * By doing so we risk context corruption and with
+ * some gens (kbl), possible system hang if reset
+ * happens during active bb execution.
+ *
+ * We rather take context corruption instead of
+ * failed reset with a wedged driver/gpu. And
+ * active bb execution case should be covered by
+ * i915_stop_engines we have before the reset.
+ */
+ }
+
+ if (INTEL_GEN(i915) >= 11)
+ ret = gen11_reset_engines(i915, engine_mask, retry);
+ else
+ ret = gen6_reset_engines(i915, engine_mask, retry);
+
+skip_reset:
+ for_each_engine_masked(engine, i915, engine_mask, tmp)
+ gen8_engine_reset_cancel(engine);
+
+ return ret;
+}
+
+typedef int (*reset_func)(struct drm_i915_private *,
+ unsigned int engine_mask,
+ unsigned int retry);
+
+static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
+{
+ if (!i915_modparams.reset)
+ return NULL;
+
+ if (INTEL_GEN(i915) >= 8)
+ return gen8_reset_engines;
+ else if (INTEL_GEN(i915) >= 6)
+ return gen6_reset_engines;
+ else if (INTEL_GEN(i915) >= 5)
+ return ironlake_do_reset;
+ else if (IS_G4X(i915))
+ return g4x_do_reset;
+ else if (IS_G33(i915) || IS_PINEVIEW(i915))
+ return g33_do_reset;
+ else if (INTEL_GEN(i915) >= 3)
+ return i915_do_reset;
+ else
+ return NULL;
+}
+
+int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
+{
+ reset_func reset = intel_get_gpu_reset(i915);
+ int retry;
+ int ret;
+
+ /*
+ * We want to perform per-engine reset from atomic context (e.g.
+ * softirq), which imposes the constraint that we cannot sleep.
+ * However, experience suggests that spending a bit of time waiting
+ * for a reset helps in various cases, so for a full-device reset
+ * we apply the opposite rule and wait if we want to. As we should
+ * always follow up a failed per-engine reset with a full device reset,
+ * being a little faster, stricter and more error prone for the
+ * atomic case seems an acceptable compromise.
+ *
+ * Unfortunately this leads to a bimodal routine, when the goal was
+ * to have a single reset function that worked for resetting any
+ * number of engines simultaneously.
+ */
+ might_sleep_if(engine_mask == ALL_ENGINES);
+
+ /*
+ * If the power well sleeps during the reset, the reset
+ * request may be dropped and never completes (causing -EIO).
+ */
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ for (retry = 0; retry < 3; retry++) {
+ /*
+ * We stop engines, otherwise we might get failed reset and a
+ * dead gpu (on elk). Also as modern gpu as kbl can suffer
+ * from system hang if batchbuffer is progressing when
+ * the reset is issued, regardless of READY_TO_RESET ack.
+ * Thus assume it is best to stop engines on all gens
+ * where we have a gpu reset.
+ *
+ * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
+ *
+ * WaMediaResetMainRingCleanup:ctg,elk (presumably)
+ *
+ * FIXME: Wa for more modern gens needs to be validated
+ */
+ i915_stop_engines(i915, engine_mask);
+
+ ret = -ENODEV;
+ if (reset) {
+ GEM_TRACE("engine_mask=%x\n", engine_mask);
+ ret = reset(i915, engine_mask, retry);
+ }
+ if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
+ break;
+
+ cond_resched();
+ }
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+bool intel_has_gpu_reset(struct drm_i915_private *i915)
+{
+ return intel_get_gpu_reset(i915);
+}
+
+bool intel_has_reset_engine(struct drm_i915_private *i915)
+{
+ return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2;
+}
+
+int intel_reset_guc(struct drm_i915_private *i915)
+{
+ u32 guc_domain =
+ INTEL_GEN(i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
+ int ret;
+
+ GEM_BUG_ON(!HAS_GUC(i915));
+
+ intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ ret = gen6_hw_domain_reset(i915, guc_domain);
+ intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+
+ return ret;
+}
+
+/*
+ * Ensure irq handler finishes, and not run again.
+ * Also return the active request so that we only search for it once.
+ */
+static struct i915_request *
+reset_prepare_engine(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ /*
+ * During the reset sequence, we must prevent the engine from
+ * entering RC6. As the context state is undefined until we restart
+ * the engine, if it does enter RC6 during the reset, the state
+ * written to the powercontext is undefined and so we may lose
+ * GPU state upon resume, i.e. fail to restart after a reset.
+ */
+ intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+
+ rq = engine->reset.prepare(engine);
+ if (rq && rq->fence.error == -EIO)
+ rq = ERR_PTR(-EIO); /* Previous reset failed! */
+
+ return rq;
+}
+
+static int reset_prepare(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ struct i915_request *rq;
+ enum intel_engine_id id;
+ int err = 0;
+
+ for_each_engine(engine, i915, id) {
+ rq = reset_prepare_engine(engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ continue;
+ }
+
+ engine->hangcheck.active_request = rq;
+ }
+
+ i915_gem_revoke_fences(i915);
+ intel_uc_sanitize(i915);
+
+ return err;
+}
+
+/* Returns the request if it was guilty of the hang */
+static struct i915_request *
+reset_request(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ bool stalled)
+{
+ /*
+ * The guilty request will get skipped on a hung engine.
+ *
+ * Users of client default contexts do not rely on logical
+ * state preserved between batches so it is safe to execute
+ * queued requests following the hang. Non default contexts
+ * rely on preserved state, so skipping a batch loses the
+ * evolution of the state and it needs to be considered corrupted.
+ * Executing more queued batches on top of corrupted state is
+ * risky. But we take the risk by trying to advance through
+ * the queued requests in order to make the client behaviour
+ * more predictable around resets, by not throwing away random
+ * amount of batches it has prepared for execution. Sophisticated
+ * clients can use gem_reset_stats_ioctl and dma fence status
+ * (exported via sync_file info ioctl on explicit fences) to observe
+ * when it loses the context state and should rebuild accordingly.
+ *
+ * The context ban, and ultimately the client ban, mechanism are safety
+ * valves if client submission ends up resulting in nothing more than
+ * subsequent hangs.
+ */
+
+ if (i915_request_completed(rq)) {
+ GEM_TRACE("%s pardoned global=%d (fence %llx:%lld), current %d\n",
+ engine->name, rq->global_seqno,
+ rq->fence.context, rq->fence.seqno,
+ intel_engine_get_seqno(engine));
+ stalled = false;
+ }
+
+ if (stalled) {
+ context_mark_guilty(rq->gem_context);
+ i915_request_skip(rq, -EIO);
+
+ /* If this context is now banned, skip all pending requests. */
+ if (i915_gem_context_is_banned(rq->gem_context))
+ engine_skip_context(rq);
+ } else {
+ /*
+ * Since this is not the hung engine, it may have advanced
+ * since the hang declaration. Double check by refinding
+ * the active request at the time of the reset.
+ */
+ rq = i915_gem_find_active_request(engine);
+ if (rq) {
+ unsigned long flags;
+
+ context_mark_innocent(rq->gem_context);
+ dma_fence_set_error(&rq->fence, -EAGAIN);
+
+ /* Rewind the engine to replay the incomplete rq */
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ rq = list_prev_entry(rq, link);
+ if (&rq->link == &engine->timeline.requests)
+ rq = NULL;
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+ }
+ }
+
+ return rq;
+}
+
+static void reset_engine(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ bool stalled)
+{
+ if (rq)
+ rq = reset_request(engine, rq, stalled);
+
+ /* Setup the CS to resume from the breadcrumb of the hung request */
+ engine->reset.reset(engine, rq);
+}
+
+static void gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ i915_retire_requests(i915);
+
+ for_each_engine(engine, i915, id) {
+ struct intel_context *ce;
+
+ reset_engine(engine,
+ engine->hangcheck.active_request,
+ stalled_mask & ENGINE_MASK(id));
+ ce = fetch_and_zero(&engine->last_retired_context);
+ if (ce)
+ intel_context_unpin(ce);
+
+ /*
+ * Ostensibily, we always want a context loaded for powersaving,
+ * so if the engine is idle after the reset, send a request
+ * to load our scratch kernel_context.
+ *
+ * More mysteriously, if we leave the engine idle after a reset,
+ * the next userspace batch may hang, with what appears to be
+ * an incoherent read by the CS (presumably stale TLB). An
+ * empty request appears sufficient to paper over the glitch.
+ */
+ if (intel_engine_is_idle(engine)) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (!IS_ERR(rq))
+ i915_request_add(rq);
+ }
+ }
+
+ i915_gem_restore_fences(i915);
+}
+
+static void reset_finish_engine(struct intel_engine_cs *engine)
+{
+ engine->reset.finish(engine);
+
+ intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+}
+
+static void reset_finish(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ for_each_engine(engine, i915, id) {
+ engine->hangcheck.active_request = NULL;
+ reset_finish_engine(engine);
+ }
+}
+
+static void nop_submit_request(struct i915_request *request)
+{
+ unsigned long flags;
+
+ GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
+ request->engine->name,
+ request->fence.context, request->fence.seqno);
+ dma_fence_set_error(&request->fence, -EIO);
+
+ spin_lock_irqsave(&request->engine->timeline.lock, flags);
+ __i915_request_submit(request);
+ intel_engine_write_global_seqno(request->engine, request->global_seqno);
+ spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
+}
+
+void i915_gem_set_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ mutex_lock(&error->wedge_mutex);
+ if (test_bit(I915_WEDGED, &error->flags)) {
+ mutex_unlock(&error->wedge_mutex);
+ return;
+ }
+
+ if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
+ struct drm_printer p = drm_debug_printer(__func__);
+
+ for_each_engine(engine, i915, id)
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ }
+
+ GEM_TRACE("start\n");
+
+ /*
+ * First, stop submission to hw, but do not yet complete requests by
+ * rolling the global seqno forward (since this would complete requests
+ * for which we haven't set the fence error to EIO yet).
+ */
+ for_each_engine(engine, i915, id)
+ reset_prepare_engine(engine);
+
+ /* Even if the GPU reset fails, it should still stop the engines */
+ if (INTEL_GEN(i915) >= 5)
+ intel_gpu_reset(i915, ALL_ENGINES);
+
+ for_each_engine(engine, i915, id) {
+ engine->submit_request = nop_submit_request;
+ engine->schedule = NULL;
+ }
+ i915->caps.scheduler = 0;
+
+ /*
+ * Make sure no request can slip through without getting completed by
+ * either this call here to intel_engine_write_global_seqno, or the one
+ * in nop_submit_request.
+ */
+ synchronize_rcu();
+
+ /* Mark all executing requests as skipped */
+ for_each_engine(engine, i915, id)
+ engine->cancel_requests(engine);
+
+ for_each_engine(engine, i915, id) {
+ reset_finish_engine(engine);
+ intel_engine_wakeup(engine);
+ }
+
+ smp_mb__before_atomic();
+ set_bit(I915_WEDGED, &error->flags);
+
+ GEM_TRACE("end\n");
+ mutex_unlock(&error->wedge_mutex);
+
+ wake_up_all(&error->reset_queue);
+}
+
+bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ struct i915_timeline *tl;
+ bool ret = false;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ if (!test_bit(I915_WEDGED, &error->flags))
+ return true;
+
+ if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
+ return false;
+
+ mutex_lock(&error->wedge_mutex);
+
+ GEM_TRACE("start\n");
+
+ /*
+ * Before unwedging, make sure that all pending operations
+ * are flushed and errored out - we may have requests waiting upon
+ * third party fences. We marked all inflight requests as EIO, and
+ * every execbuf since returned EIO, for consistency we want all
+ * the currently pending requests to also be marked as EIO, which
+ * is done inside our nop_submit_request - and so we must wait.
+ *
+ * No more can be submitted until we reset the wedged bit.
+ */
+ list_for_each_entry(tl, &i915->gt.timelines, link) {
+ struct i915_request *rq;
+
+ rq = i915_gem_active_peek(&tl->last_request,
+ &i915->drm.struct_mutex);
+ if (!rq)
+ continue;
+
+ /*
+ * We can't use our normal waiter as we want to
+ * avoid recursively trying to handle the current
+ * reset. The basic dma_fence_default_wait() installs
+ * a callback for dma_fence_signal(), which is
+ * triggered by our nop handler (indirectly, the
+ * callback enables the signaler thread which is
+ * woken by the nop_submit_request() advancing the seqno
+ * and when the seqno passes the fence, the signaler
+ * then signals the fence waking us up).
+ */
+ if (dma_fence_default_wait(&rq->fence, true,
+ MAX_SCHEDULE_TIMEOUT) < 0)
+ goto unlock;
+ }
+ i915_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
+
+ intel_engines_sanitize(i915, false);
+
+ /*
+ * Undo nop_submit_request. We prevent all new i915 requests from
+ * being queued (by disallowing execbuf whilst wedged) so having
+ * waited for all active requests above, we know the system is idle
+ * and do not have to worry about a thread being inside
+ * engine->submit_request() as we swap over. So unlike installing
+ * the nop_submit_request on reset, we can do this from normal
+ * context and do not require stop_machine().
+ */
+ intel_engines_reset_default_submission(i915);
+ i915_gem_contexts_lost(i915);
+
+ GEM_TRACE("end\n");
+
+ smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
+ clear_bit(I915_WEDGED, &i915->gpu_error.flags);
+ ret = true;
+unlock:
+ mutex_unlock(&i915->gpu_error.wedge_mutex);
+
+ return ret;
+}
+
+/**
+ * i915_reset - reset chip after a hang
+ * @i915: #drm_i915_private to reset
+ * @stalled_mask: mask of the stalled engines with the guilty requests
+ * @reason: user error message for why we are resetting
+ *
+ * Reset the chip. Useful if a hang is detected. Marks the device as wedged
+ * on failure.
+ *
+ * Caller must hold the struct_mutex.
+ *
+ * Procedure is fairly simple:
+ * - reset the chip using the reset reg
+ * - re-init context state
+ * - re-init hardware status page
+ * - re-init ring buffer
+ * - re-init interrupt state
+ * - re-init display
+ */
+void i915_reset(struct drm_i915_private *i915,
+ unsigned int stalled_mask,
+ const char *reason)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ int ret;
+ int i;
+
+ GEM_TRACE("flags=%lx\n", error->flags);
+
+ might_sleep();
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ assert_rpm_wakelock_held(i915);
+ GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
+
+ if (!test_bit(I915_RESET_HANDOFF, &error->flags))
+ return;
+
+ /* Clear any previous failed attempts at recovery. Time to try again. */
+ if (!i915_gem_unset_wedged(i915))
+ goto wakeup;
+
+ if (reason)
+ dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
+ error->reset_count++;
+
+ ret = reset_prepare(i915);
+ if (ret) {
+ dev_err(i915->drm.dev, "GPU recovery failed\n");
+ goto taint;
+ }
+
+ if (!intel_has_gpu_reset(i915)) {
+ if (i915_modparams.reset)
+ dev_err(i915->drm.dev, "GPU reset not supported\n");
+ else
+ DRM_DEBUG_DRIVER("GPU reset disabled\n");
+ goto error;
+ }
+
+ for (i = 0; i < 3; i++) {
+ ret = intel_gpu_reset(i915, ALL_ENGINES);
+ if (ret == 0)
+ break;
+
+ msleep(100);
+ }
+ if (ret) {
+ dev_err(i915->drm.dev, "Failed to reset chip\n");
+ goto taint;
+ }
+
+ /* Ok, now get things going again... */
+
+ /*
+ * Everything depends on having the GTT running, so we need to start
+ * there.
+ */
+ ret = i915_ggtt_enable_hw(i915);
+ if (ret) {
+ DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n",
+ ret);
+ goto error;
+ }
+
+ gt_reset(i915, stalled_mask);
+ intel_overlay_reset(i915);
+
+ /*
+ * Next we need to restore the context, but we don't use those
+ * yet either...
+ *
+ * Ring buffer needs to be re-initialized in the KMS case, or if X
+ * was running at the time of the reset (i.e. we weren't VT
+ * switched away).
+ */
+ ret = i915_gem_init_hw(i915);
+ if (ret) {
+ DRM_ERROR("Failed to initialise HW following reset (%d)\n",
+ ret);
+ goto error;
+ }
+
+ i915_queue_hangcheck(i915);
+
+finish:
+ reset_finish(i915);
+wakeup:
+ clear_bit(I915_RESET_HANDOFF, &error->flags);
+ wake_up_bit(&error->flags, I915_RESET_HANDOFF);
+ return;
+
+taint:
+ /*
+ * History tells us that if we cannot reset the GPU now, we
+ * never will. This then impacts everything that is run
+ * subsequently. On failing the reset, we mark the driver
+ * as wedged, preventing further execution on the GPU.
+ * We also want to go one step further and add a taint to the
+ * kernel so that any subsequent faults can be traced back to
+ * this failure. This is important for CI, where if the
+ * GPU/driver fails we would like to reboot and restart testing
+ * rather than continue on into oblivion. For everyone else,
+ * the system should still plod along, but they have been warned!
+ */
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+error:
+ i915_gem_set_wedged(i915);
+ i915_retire_requests(i915);
+ goto finish;
+}
+
+static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
+ struct intel_engine_cs *engine)
+{
+ return intel_gpu_reset(i915, intel_engine_flag(engine));
+}
+
+/**
+ * i915_reset_engine - reset GPU engine to recover from a hang
+ * @engine: engine to reset
+ * @msg: reason for GPU reset; or NULL for no dev_notice()
+ *
+ * Reset a specific GPU engine. Useful if a hang is detected.
+ * Returns zero on successful reset or otherwise an error code.
+ *
+ * Procedure is:
+ * - identifies the request that caused the hang and it is dropped
+ * - reset engine (which will force the engine to idle)
+ * - re-init/configure engine
+ */
+int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
+{
+ struct i915_gpu_error *error = &engine->i915->gpu_error;
+ struct i915_request *active_request;
+ int ret;
+
+ GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
+ GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
+
+ active_request = reset_prepare_engine(engine);
+ if (IS_ERR_OR_NULL(active_request)) {
+ /* Either the previous reset failed, or we pardon the reset. */
+ ret = PTR_ERR(active_request);
+ goto out;
+ }
+
+ if (msg)
+ dev_notice(engine->i915->drm.dev,
+ "Resetting %s for %s\n", engine->name, msg);
+ error->reset_engine_count[engine->id]++;
+
+ if (!engine->i915->guc.execbuf_client)
+ ret = intel_gt_reset_engine(engine->i915, engine);
+ else
+ ret = intel_guc_reset_engine(&engine->i915->guc, engine);
+ if (ret) {
+ /* If we fail here, we expect to fallback to a global reset */
+ DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
+ engine->i915->guc.execbuf_client ? "GuC " : "",
+ engine->name, ret);
+ goto out;
+ }
+
+ /*
+ * The request that caused the hang is stuck on elsp, we know the
+ * active request and can drop it, adjust head to skip the offending
+ * request to resume executing remaining requests in the queue.
+ */
+ reset_engine(engine, active_request, true);
+
+ /*
+ * The engine and its registers (and workarounds in case of render)
+ * have been reset to their default values. Follow the init_ring
+ * process to program RING_MODE, HWSP and re-enable submission.
+ */
+ ret = engine->init_hw(engine);
+ if (ret)
+ goto out;
+
+out:
+ intel_engine_cancel_stop_cs(engine);
+ reset_finish_engine(engine);
+ return ret;
+}
+
+static void i915_reset_device(struct drm_i915_private *i915,
+ u32 engine_mask,
+ const char *reason)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ struct kobject *kobj = &i915->drm.primary->kdev->kobj;
+ char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+ char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+ char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
+ struct i915_wedge_me w;
+
+ kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
+
+ DRM_DEBUG_DRIVER("resetting chip\n");
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
+
+ /* Use a watchdog to ensure that our reset completes */
+ i915_wedge_on_timeout(&w, i915, 5 * HZ) {
+ intel_prepare_reset(i915);
+
+ error->reason = reason;
+ error->stalled_mask = engine_mask;
+
+ /* Signal that locked waiters should reset the GPU */
+ smp_mb__before_atomic();
+ set_bit(I915_RESET_HANDOFF, &error->flags);
+ wake_up_all(&error->wait_queue);
+
+ /*
+ * Wait for anyone holding the lock to wakeup, without
+ * blocking indefinitely on struct_mutex.
+ */
+ do {
+ if (mutex_trylock(&i915->drm.struct_mutex)) {
+ i915_reset(i915, engine_mask, reason);
+ mutex_unlock(&i915->drm.struct_mutex);
+ }
+ } while (wait_on_bit_timeout(&error->flags,
+ I915_RESET_HANDOFF,
+ TASK_UNINTERRUPTIBLE,
+ 1));
+
+ error->stalled_mask = 0;
+ error->reason = NULL;
+
+ intel_finish_reset(i915);
+ }
+
+ if (!test_bit(I915_WEDGED, &error->flags))
+ kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
+}
+
+void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+{
+ u32 eir;
+
+ if (!IS_GEN(dev_priv, 2))
+ I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
+
+ if (INTEL_GEN(dev_priv) < 4)
+ I915_WRITE(IPEIR, I915_READ(IPEIR));
+ else
+ I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
+
+ I915_WRITE(EIR, I915_READ(EIR));
+ eir = I915_READ(EIR);
+ if (eir) {
+ /*
+ * some errors might have become stuck,
+ * mask them.
+ */
+ DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
+ I915_WRITE(EMR, I915_READ(EMR) | eir);
+ I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ I915_WRITE(GEN8_RING_FAULT_REG,
+ I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
+ POSTING_READ(GEN8_RING_FAULT_REG);
+ } else if (INTEL_GEN(dev_priv) >= 6) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id) {
+ I915_WRITE(RING_FAULT_REG(engine),
+ I915_READ(RING_FAULT_REG(engine)) &
+ ~RING_FAULT_VALID);
+ }
+ POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
+ }
+}
+
+/**
+ * i915_handle_error - handle a gpu error
+ * @i915: i915 device private
+ * @engine_mask: mask representing engines that are hung
+ * @flags: control flags
+ * @fmt: Error message format string
+ *
+ * Do some basic checking of register state at error time and
+ * dump it to the syslog. Also call i915_capture_error_state() to make
+ * sure we get a record and make it available in debugfs. Fire a uevent
+ * so userspace knows something bad happened (should trigger collection
+ * of a ring dump etc.).
+ */
+void i915_handle_error(struct drm_i915_private *i915,
+ u32 engine_mask,
+ unsigned long flags,
+ const char *fmt, ...)
+{
+ struct intel_engine_cs *engine;
+ intel_wakeref_t wakeref;
+ unsigned int tmp;
+ char error_msg[80];
+ char *msg = NULL;
+
+ if (fmt) {
+ va_list args;
+
+ va_start(args, fmt);
+ vscnprintf(error_msg, sizeof(error_msg), fmt, args);
+ va_end(args);
+
+ msg = error_msg;
+ }
+
+ /*
+ * In most cases it's guaranteed that we get here with an RPM
+ * reference held, for example because there is a pending GPU
+ * request that won't finish until the reset is done. This
+ * isn't the case at least when we get here by doing a
+ * simulated reset via debugfs, so get an RPM reference.
+ */
+ wakeref = intel_runtime_pm_get(i915);
+
+ engine_mask &= INTEL_INFO(i915)->ring_mask;
+
+ if (flags & I915_ERROR_CAPTURE) {
+ i915_capture_error_state(i915, engine_mask, msg);
+ i915_clear_error_registers(i915);
+ }
+
+ /*
+ * Try engine reset when available. We fall back to full reset if
+ * single reset fails.
+ */
+ if (intel_has_reset_engine(i915) &&
+ !i915_terminally_wedged(&i915->gpu_error)) {
+ for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags))
+ continue;
+
+ if (i915_reset_engine(engine, msg) == 0)
+ engine_mask &= ~intel_engine_flag(engine);
+
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags);
+ wake_up_bit(&i915->gpu_error.flags,
+ I915_RESET_ENGINE + engine->id);
+ }
+ }
+
+ if (!engine_mask)
+ goto out;
+
+ /* Full reset needs the mutex, stop any other user trying to do so. */
+ if (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) {
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &i915->gpu_error.flags));
+ goto out;
+ }
+
+ /* Prevent any other reset-engine attempt. */
+ for_each_engine(engine, i915, tmp) {
+ while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags))
+ wait_on_bit(&i915->gpu_error.flags,
+ I915_RESET_ENGINE + engine->id,
+ TASK_UNINTERRUPTIBLE);
+ }
+
+ i915_reset_device(i915, engine_mask, msg);
+
+ for_each_engine(engine, i915, tmp) {
+ clear_bit(I915_RESET_ENGINE + engine->id,
+ &i915->gpu_error.flags);
+ }
+
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ wake_up_all(&i915->gpu_error.reset_queue);
+
+out:
+ intel_runtime_pm_put(i915, wakeref);
+}
+
+static void i915_wedge_me(struct work_struct *work)
+{
+ struct i915_wedge_me *w = container_of(work, typeof(*w), work.work);
+
+ dev_err(w->i915->drm.dev,
+ "%s timed out, cancelling all in-flight rendering.\n",
+ w->name);
+ i915_gem_set_wedged(w->i915);
+}
+
+void __i915_init_wedge(struct i915_wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const char *name)
+{
+ w->i915 = i915;
+ w->name = name;
+
+ INIT_DELAYED_WORK_ONSTACK(&w->work, i915_wedge_me);
+ schedule_delayed_work(&w->work, timeout);
+}
+
+void __i915_fini_wedge(struct i915_wedge_me *w)
+{
+ cancel_delayed_work_sync(&w->work);
+ destroy_delayed_work_on_stack(&w->work);
+ w->i915 = NULL;
+}
diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h
new file mode 100644
index 000000000000..b6a519bde67d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_reset.h
@@ -0,0 +1,56 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2008-2018 Intel Corporation
+ */
+
+#ifndef I915_RESET_H
+#define I915_RESET_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_engine_cs;
+struct intel_guc;
+
+__printf(4, 5)
+void i915_handle_error(struct drm_i915_private *i915,
+ u32 engine_mask,
+ unsigned long flags,
+ const char *fmt, ...);
+#define I915_ERROR_CAPTURE BIT(0)
+
+void i915_clear_error_registers(struct drm_i915_private *i915);
+
+void i915_reset(struct drm_i915_private *i915,
+ unsigned int stalled_mask,
+ const char *reason);
+int i915_reset_engine(struct intel_engine_cs *engine,
+ const char *reason);
+
+bool intel_has_gpu_reset(struct drm_i915_private *i915);
+bool intel_has_reset_engine(struct drm_i915_private *i915);
+
+int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask);
+
+int intel_reset_guc(struct drm_i915_private *i915);
+
+struct i915_wedge_me {
+ struct delayed_work work;
+ struct drm_i915_private *i915;
+ const char *name;
+};
+
+void __i915_init_wedge(struct i915_wedge_me *w,
+ struct drm_i915_private *i915,
+ long timeout,
+ const char *name);
+void __i915_fini_wedge(struct i915_wedge_me *w);
+
+#define i915_wedge_on_timeout(W, DEV, TIMEOUT) \
+ for (__i915_init_wedge((W), (DEV), (TIMEOUT), __func__); \
+ (W)->i915; \
+ __i915_fini_wedge((W)))
+
+#endif /* I915_RESET_H */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8f3aa4dc0c98..f18afa2bac8d 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,7 +24,6 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_reg.h"
@@ -65,7 +64,7 @@ int i915_save_state(struct drm_i915_private *dev_priv)
i915_save_display(dev_priv);
- if (IS_GEN4(dev_priv))
+ if (IS_GEN(dev_priv, 4))
pci_read_config_word(pdev, GCDGMBUS,
&dev_priv->regfile.saveGCDGMBUS);
@@ -77,14 +76,14 @@ int i915_save_state(struct drm_i915_private *dev_priv)
dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
- if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
+ if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i));
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
}
for (i = 0; i < 3; i++)
dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i));
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i));
} else if (HAS_GMCH_DISPLAY(dev_priv)) {
@@ -108,7 +107,7 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
- if (IS_GEN4(dev_priv))
+ if (IS_GEN(dev_priv, 4))
pci_write_config_word(pdev, GCDGMBUS,
dev_priv->regfile.saveGCDGMBUS);
i915_restore_display(dev_priv);
@@ -122,14 +121,14 @@ int i915_restore_state(struct drm_i915_private *dev_priv)
I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
/* Scratch space */
- if (IS_GEN2(dev_priv) && IS_MOBILE(dev_priv)) {
+ if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) {
for (i = 0; i < 7; i++) {
I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]);
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]);
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
for (i = 0; i < 7; i++)
I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]);
} else if (HAS_GMCH_DISPLAY(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 535caebd9813..41313005af42 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -42,11 +42,11 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
static u32 calc_residency(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
- u64 res;
+ intel_wakeref_t wakeref;
+ u64 res = 0;
- intel_runtime_pm_get(dev_priv);
- res = intel_rc6_residency_us(dev_priv, reg);
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ res = intel_rc6_residency_us(dev_priv, reg);
return DIV_ROUND_CLOSEST_ULL(res, 1000);
}
@@ -258,9 +258,10 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+ intel_wakeref_t wakeref;
int ret;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -274,7 +275,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
}
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return snprintf(buf, PAGE_SIZE, "%d\n", ret);
}
@@ -354,6 +355,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ intel_wakeref_t wakeref;
u32 val;
ssize_t ret;
@@ -361,7 +363,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->pcu_lock);
@@ -371,7 +373,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return -EINVAL;
}
@@ -392,7 +394,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return ret ?: count;
}
@@ -412,6 +414,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
+ intel_wakeref_t wakeref;
u32 val;
ssize_t ret;
@@ -419,7 +422,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret)
return ret;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->pcu_lock);
@@ -429,7 +432,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return -EINVAL;
}
@@ -446,7 +449,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
mutex_unlock(&dev_priv->pcu_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return ret ?: count;
}
@@ -521,7 +524,9 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
ssize_t ret;
gpu = i915_first_error_state(i915);
- if (gpu) {
+ if (IS_ERR(gpu)) {
+ ret = PTR_ERR(gpu);
+ } else if (gpu) {
ret = i915_gpu_state_copy_to_buffer(gpu, buf, off, count);
i915_gpu_state_put(gpu);
} else {
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index ebd71b487220..38c1e15e927a 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -63,14 +63,6 @@ struct i915_timeline {
* redundant and we can discard it without loss of generality.
*/
struct i915_syncmap *sync;
- /**
- * Separately to the inter-context seqno map above, we track the last
- * barrier (e.g. semaphore wait) to the global engine timelines. Note
- * that this tracks global_seqno rather than the context.seqno, and
- * so it is subject to the limitations of hw wraparound and that we
- * may need to revoke global_seqno (on pre-emption).
- */
- u32 global_sync[I915_NUM_ENGINES];
struct list_head link;
const char *name;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index b50c6b829715..33d90eca9cdd 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,7 +6,6 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_ringbuffer.h"
@@ -585,35 +584,6 @@ TRACE_EVENT(i915_gem_evict_vm,
TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
);
-TRACE_EVENT(i915_gem_ring_sync_to,
- TP_PROTO(struct i915_request *to, struct i915_request *from),
- TP_ARGS(to, from),
-
- TP_STRUCT__entry(
- __field(u32, dev)
- __field(u32, from_class)
- __field(u32, from_instance)
- __field(u32, to_class)
- __field(u32, to_instance)
- __field(u32, seqno)
- ),
-
- TP_fast_assign(
- __entry->dev = from->i915->drm.primary->index;
- __entry->from_class = from->engine->uabi_class;
- __entry->from_instance = from->engine->instance;
- __entry->to_class = to->engine->uabi_class;
- __entry->to_instance = to->engine->instance;
- __entry->seqno = from->global_seqno;
- ),
-
- TP_printk("dev=%u, sync-from=%u:%u, sync-to=%u:%u, seqno=%u",
- __entry->dev,
- __entry->from_class, __entry->from_instance,
- __entry->to_class, __entry->to_instance,
- __entry->seqno)
-);
-
TRACE_EVENT(i915_request_queue,
TP_PROTO(struct i915_request *rq, u32 flags),
TP_ARGS(rq, flags),
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index d9dcee4ec51f..355b48d1c937 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -337,9 +337,11 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
}
for_each_dsi_port(port, intel_dsi->ports) {
- intel_display_power_get(dev_priv, port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO);
+ intel_dsi->io_wakeref[port] =
+ intel_display_power_get(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO);
}
}
@@ -1125,10 +1127,18 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
enum port port;
u32 tmp;
- intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_A_IO);
-
- if (intel_dsi->dual_link)
- intel_display_power_put(dev_priv, POWER_DOMAIN_PORT_DDI_B_IO);
+ for_each_dsi_port(port, intel_dsi->ports) {
+ intel_wakeref_t wakeref;
+
+ wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
+ if (wakeref) {
+ intel_display_power_put(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ wakeref);
+ }
+ }
/* set mode to DDI */
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1229,13 +1239,15 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u32 tmp;
- enum port port;
enum transcoder dsi_trans;
+ intel_wakeref_t wakeref;
+ enum port port;
bool ret = false;
+ u32 tmp;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
for_each_dsi_port(port, intel_dsi->ports) {
@@ -1260,7 +1272,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
ret = tmp & PIPECONF_ENABLE;
}
out:
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 6ba478e57b9b..9d142d038a7d 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -6,7 +6,6 @@
*/
#include <linux/pci.h>
#include <linux/acpi.h>
-#include <drm/drmP.h>
#include "i915_drv.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 8cb02f28d30c..16263add3cdd 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -29,7 +29,6 @@
* See intel_atomic_plane.c for the plane-specific atomic functionality.
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
@@ -47,7 +46,7 @@
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
- uint64_t *val)
+ u64 *val)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -79,7 +78,7 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
- uint64_t val)
+ u64 val)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -233,7 +232,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
if (plane_state && plane_state->base.fb &&
plane_state->base.fb->format->is_yuv &&
plane_state->base.fb->format->num_planes > 1) {
- if (IS_GEN9(dev_priv) &&
+ if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv)) {
mode = SKL_PS_SCALER_MODE_NV12;
} else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 0a73e6e65c20..9a2fdc77ebcb 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -31,7 +31,6 @@
* prepare/check/commit/cleanup steps.
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
@@ -312,7 +311,7 @@ int
intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
- uint64_t *val)
+ u64 *val)
{
DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
@@ -335,7 +334,7 @@ int
intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
- uint64_t val)
+ u64 val)
{
DRM_DEBUG_KMS("Unknown property [PROP:%d:%s]\n",
property->base.id, property->name);
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index ae55a6865d5c..de26cd0a5497 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -27,7 +27,6 @@
#include <drm/intel_lpe_audio.h>
#include "intel_drv.h"
-#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "i915_drv.h"
@@ -749,7 +748,8 @@ static void i915_audio_component_get_power(struct device *kdev)
static void i915_audio_component_put_power(struct device *kdev)
{
- intel_display_power_put(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
+ intel_display_power_put_unchecked(kdev_to_i915(kdev),
+ POWER_DOMAIN_AUDIO);
}
static void i915_audio_component_codec_wake_override(struct device *kdev,
@@ -758,7 +758,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
u32 tmp;
- if (!IS_GEN9(dev_priv))
+ if (!IS_GEN(dev_priv, 9))
return;
i915_audio_component_get_power(kdev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 6d3e0260d49c..561a4f9f044c 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_dp_helper.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -453,7 +452,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
* Only parse SDVO mappings on gens that could have SDVO. This isn't
* accurate and doesn't have to be, as long as it's not too strict.
*/
- if (!IS_GEN(dev_priv, 3, 7)) {
+ if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
return;
}
@@ -1386,8 +1385,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
info->supports_dp = is_dp;
info->supports_edp = is_edp;
- DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d\n",
- port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt);
+ if (bdb_version >= 195)
+ info->supports_typec_usb = child->dp_usb_type_c;
+
+ if (bdb_version >= 209)
+ info->supports_tbt = child->tbt;
+
+ DRM_DEBUG_KMS("Port %c VBT info: DP:%d HDMI:%d DVI:%d EDP:%d CRT:%d TCUSB:%d TBT:%d\n",
+ port_name(port), is_dp, is_hdmi, is_dvi, is_edp, is_crt,
+ info->supports_typec_usb, info->supports_tbt);
if (is_edp && is_dvi)
DRM_DEBUG_KMS("Internal DP port %c is TMDS compatible\n",
@@ -1940,6 +1946,15 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
};
int i;
+ if (HAS_DDI(dev_priv)) {
+ const struct ddi_vbt_port_info *port_info =
+ &dev_priv->vbt.ddi_port_info[port];
+
+ return port_info->supports_dp ||
+ port_info->supports_dvi ||
+ port_info->supports_hdmi;
+ }
+
/* FIXME maybe deal with port A as well? */
if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 447c5256f63a..b58915b8708b 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -158,36 +158,24 @@ static void intel_breadcrumbs_fake_irq(struct timer_list *t)
static void irq_enable(struct intel_engine_cs *engine)
{
- /*
- * FIXME: Ideally we want this on the API boundary, but for the
- * sake of testing with mock breadcrumbs (no HW so unable to
- * enable irqs) we place it deep within the bowels, at the point
- * of no return.
- */
- GEM_BUG_ON(!intel_irqs_enabled(engine->i915));
-
- /* Enabling the IRQ may miss the generation of the interrupt, but
- * we still need to force the barrier before reading the seqno,
- * just in case.
- */
- set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+ if (!engine->irq_enable)
+ return;
/* Caller disables interrupts */
- if (engine->irq_enable) {
- spin_lock(&engine->i915->irq_lock);
- engine->irq_enable(engine);
- spin_unlock(&engine->i915->irq_lock);
- }
+ spin_lock(&engine->i915->irq_lock);
+ engine->irq_enable(engine);
+ spin_unlock(&engine->i915->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
{
+ if (!engine->irq_disable)
+ return;
+
/* Caller disables interrupts */
- if (engine->irq_disable) {
- spin_lock(&engine->i915->irq_lock);
- engine->irq_disable(engine);
- spin_unlock(&engine->i915->irq_lock);
- }
+ spin_lock(&engine->i915->irq_lock);
+ engine->irq_disable(engine);
+ spin_unlock(&engine->i915->irq_lock);
}
void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
@@ -299,25 +287,16 @@ static bool __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
if (b->irq_armed)
return false;
- /* The breadcrumb irq will be disarmed on the interrupt after the
+ /*
+ * The breadcrumb irq will be disarmed on the interrupt after the
* waiters are signaled. This gives us a single interrupt window in
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
b->irq_armed = true;
- if (I915_SELFTEST_ONLY(b->mock)) {
- /* For our mock objects we want to avoid interaction
- * with the real hardware (which is not set up). So
- * we simply pretend we have enabled the powerwell
- * and the irq, and leave it up to the mock
- * implementation to call intel_engine_wakeup()
- * itself when it wants to simulate a user interrupt,
- */
- return true;
- }
-
- /* Since we are waiting on a request, the GPU should be busy
+ /*
+ * Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference. This is tracked
* by i915->gt.awake, we can forgo holding our own wakref
* for the interrupt as before i915->gt.awake is released (when
@@ -652,8 +631,7 @@ static int intel_breadcrumbs_signaler(void *arg)
rq->signaling.wait.seqno = 0;
__list_del_entry(&rq->signaling.link);
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &rq->fence.flags)) {
+ if (!i915_request_signaled(rq)) {
list_add_tail(&rq->signaling.link,
&list);
i915_request_get(rq);
@@ -683,16 +661,6 @@ static int intel_breadcrumbs_signaler(void *arg)
}
if (unlikely(do_schedule)) {
- /* Before we sleep, check for a missed seqno */
- if (current->state & TASK_NORMAL &&
- !list_empty(&b->signals) &&
- engine->irq_seqno_barrier &&
- test_and_clear_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)) {
- engine->irq_seqno_barrier(engine);
- intel_engine_wakeup(engine);
- }
-
sleep:
if (kthread_should_park())
kthread_parkme();
@@ -859,16 +827,6 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
else
irq_disable(engine);
- /*
- * We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
- * GPU is active and may have already executed the MI_USER_INTERRUPT
- * before the CPU is ready to receive. However, the engine is currently
- * idle (we haven't started it yet), there is no possibility for a
- * missed interrupt as we enabled the irq and so we can clear the
- * immediate wakeup (until a real interrupt arrives for the waiter).
- */
- clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
-
spin_unlock_irqrestore(&b->irq_lock, flags);
}
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 25e3aba9cded..15ba950dee00 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -218,7 +218,7 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
};
const unsigned int *vco_table;
unsigned int vco;
- uint8_t tmp = 0;
+ u8 tmp = 0;
/* FIXME other chipsets? */
if (IS_GM45(dev_priv))
@@ -249,13 +249,13 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
- static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
- static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
- static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
- const uint8_t *div_table;
+ static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
+ static const u8 div_4000[] = { 14, 12, 10, 8, 6, 20 };
+ static const u8 div_4800[] = { 20, 14, 12, 10, 8, 24 };
+ static const u8 div_5333[] = { 20, 16, 12, 12, 8, 28 };
+ const u8 *div_table;
unsigned int cdclk_sel;
- uint16_t tmp = 0;
+ u16 tmp = 0;
cdclk_state->vco = intel_hpll_vco(dev_priv);
@@ -330,12 +330,12 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
- static const uint8_t div_3200[] = { 16, 10, 8 };
- static const uint8_t div_4000[] = { 20, 12, 10 };
- static const uint8_t div_5333[] = { 24, 16, 14 };
- const uint8_t *div_table;
+ static const u8 div_3200[] = { 16, 10, 8 };
+ static const u8 div_4000[] = { 20, 12, 10 };
+ static const u8 div_5333[] = { 24, 16, 14 };
+ const u8 *div_table;
unsigned int cdclk_sel;
- uint16_t tmp = 0;
+ u16 tmp = 0;
cdclk_state->vco = intel_hpll_vco(dev_priv);
@@ -375,7 +375,7 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
{
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int cdclk_sel;
- uint16_t tmp = 0;
+ u16 tmp = 0;
cdclk_state->vco = intel_hpll_vco(dev_priv);
@@ -403,8 +403,8 @@ static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
- uint32_t lcpll = I915_READ(LCPLL_CTL);
- uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+ u32 lcpll = I915_READ(LCPLL_CTL);
+ u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
cdclk_state->cdclk = 800000;
@@ -520,6 +520,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
+ intel_wakeref_t wakeref;
switch (cdclk) {
case 400000:
@@ -539,7 +540,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
* a system suspend. So grab the PIPE-A domain, which covers
* the HW blocks needed for the following programming.
*/
- intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
@@ -593,7 +594,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
@@ -601,6 +602,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
+ intel_wakeref_t wakeref;
switch (cdclk) {
case 333333:
@@ -619,7 +621,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
* a system suspend. So grab the PIPE-A domain, which covers
* the HW blocks needed for the following programming.
*/
- intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
@@ -637,7 +639,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
vlv_program_pfi_credits(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A, wakeref);
}
static int bdw_calc_cdclk(int min_cdclk)
@@ -670,8 +672,8 @@ static u8 bdw_calc_voltage_level(int cdclk)
static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state)
{
- uint32_t lcpll = I915_READ(LCPLL_CTL);
- uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+ u32 lcpll = I915_READ(LCPLL_CTL);
+ u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
cdclk_state->cdclk = 800000;
@@ -698,7 +700,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
const struct intel_cdclk_state *cdclk_state)
{
int cdclk = cdclk_state->cdclk;
- uint32_t val;
+ u32 val;
int ret;
if (WARN((I915_READ(LCPLL_CTL) &
@@ -1081,7 +1083,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
{
- uint32_t cdctl, expected;
+ u32 cdctl, expected;
/*
* check if the pre-os initialized the display
@@ -2140,7 +2142,7 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
{
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
- else if (IS_GEN9(dev_priv) ||
+ else if (IS_GEN(dev_priv, 9) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return pixel_rate;
else if (IS_CHERRYVIEW(dev_priv))
@@ -2176,7 +2178,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) {
/* Display WA #1145: glk,cnl */
min_cdclk = max(316800, min_cdclk);
- } else if (IS_GEN9(dev_priv) || IS_BROADWELL(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 9) || IS_BROADWELL(dev_priv)) {
/* Display WA #1144: skl,bxt */
min_cdclk = max(432000, min_cdclk);
}
@@ -2537,7 +2539,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return 2 * max_cdclk_freq;
- else if (IS_GEN9(dev_priv) ||
+ else if (IS_GEN(dev_priv, 9) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
else if (IS_CHERRYVIEW(dev_priv))
@@ -2688,7 +2690,7 @@ static int vlv_hrawclk(struct drm_i915_private *dev_priv)
static int g4x_hrawclk(struct drm_i915_private *dev_priv)
{
- uint32_t clkcfg;
+ u32 clkcfg;
/* hrawclock is 1/4 the FSB frequency */
clkcfg = I915_READ(CLKCFG);
@@ -2785,9 +2787,9 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.get_cdclk = hsw_get_cdclk;
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
dev_priv->display.get_cdclk = vlv_get_cdclk;
- else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
- else if (IS_GEN5(dev_priv))
+ else if (IS_GEN(dev_priv, 5))
dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
else if (IS_GM45(dev_priv))
dev_priv->display.get_cdclk = gm45_get_cdclk;
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 5127da286a2b..299eb7858adc 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -74,12 +74,17 @@
#define ILK_CSC_COEFF_1_0 \
((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
-static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
+static bool lut_is_legacy(struct drm_property_blob *lut)
{
- return !state->degamma_lut &&
- !state->ctm &&
- state->gamma_lut &&
- drm_color_lut_size(state->gamma_lut) == LEGACY_LUT_LENGTH;
+ return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
+}
+
+static bool crtc_state_is_legacy_gamma(struct intel_crtc_state *crtc_state)
+{
+ return !crtc_state->base.degamma_lut &&
+ !crtc_state->base.ctm &&
+ crtc_state->base.gamma_lut &&
+ lut_is_legacy(crtc_state->base.gamma_lut);
}
/*
@@ -108,10 +113,10 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
return result;
}
-static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
+static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
{
- int pipe = intel_crtc->pipe;
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
+ int pipe = crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
@@ -132,14 +137,12 @@ static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
}
-static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
+static void ilk_load_csc_matrix(struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int i, pipe = intel_crtc->pipe;
- uint16_t coeffs[9] = { 0, };
- struct intel_crtc_state *intel_crtc_state = to_intel_crtc_state(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, pipe = crtc->pipe;
+ u16 coeffs[9] = { 0, };
bool limited_color_range = false;
/*
@@ -147,14 +150,14 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
* do the range compression using the gamma LUT instead.
*/
if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
- limited_color_range = intel_crtc_state->limited_color_range;
+ limited_color_range = crtc_state->limited_color_range;
- if (intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- intel_crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
- ilk_load_ycbcr_conversion_matrix(intel_crtc);
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
+ ilk_load_ycbcr_conversion_matrix(crtc);
return;
- } else if (crtc_state->ctm) {
- struct drm_color_ctm *ctm = crtc_state->ctm->data;
+ } else if (crtc_state->base.ctm) {
+ struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
const u64 *input;
u64 temp[9];
@@ -168,7 +171,7 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
* hardware.
*/
for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i];
+ u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
/*
* Clamp input value to min/max supported by
@@ -230,7 +233,7 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
if (INTEL_GEN(dev_priv) > 6) {
- uint16_t postoff = 0;
+ u16 postoff = 0;
if (limited_color_range)
postoff = (16 * (1 << 12) / 255) & 0x1fff;
@@ -241,7 +244,7 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
} else {
- uint32_t mode = CSC_MODE_YUV_TO_RGB;
+ u32 mode = CSC_MODE_YUV_TO_RGB;
if (limited_color_range)
mode |= CSC_BLACK_SCREEN_OFFSET;
@@ -253,21 +256,20 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
/*
* Set up the pipe CSC unit on CherryView.
*/
-static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
+static void cherryview_load_csc_matrix(struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- int pipe = to_intel_crtc(crtc)->pipe;
- uint32_t mode;
+ int pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ u32 mode;
- if (state->ctm) {
- struct drm_color_ctm *ctm = state->ctm->data;
- uint16_t coeffs[9] = { 0, };
+ if (crtc_state->base.ctm) {
+ struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ u16 coeffs[9] = { 0, };
int i;
for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- uint64_t abs_coeff =
+ u64 abs_coeff =
((1ULL << 63) - 1) & ctm->matrix[i];
/* Round coefficient. */
@@ -293,17 +295,17 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
}
- mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0);
- if (!crtc_state_is_legacy_gamma(state)) {
- mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
- (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
+ mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
+ if (!crtc_state_is_legacy_gamma(crtc_state)) {
+ mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+ (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
}
I915_WRITE(CGM_PIPE_MODE(pipe), mode);
}
-void intel_color_set_csc(struct drm_crtc_state *crtc_state)
+void intel_color_set_csc(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc_state->crtc->dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
if (dev_priv->display.load_csc_matrix)
@@ -311,14 +313,12 @@ void intel_color_set_csc(struct drm_crtc_state *crtc_state)
}
/* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(struct drm_crtc *crtc,
- struct drm_property_blob *blob,
- struct intel_crtc_state *crtc_state)
+static void i9xx_load_luts_internal(struct intel_crtc_state *crtc_state,
+ struct drm_property_blob *blob)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
int i;
if (HAS_GMCH_DISPLAY(dev_priv)) {
@@ -331,7 +331,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
if (blob) {
struct drm_color_lut *lut = blob->data;
for (i = 0; i < 256; i++) {
- uint32_t word =
+ u32 word =
(drm_color_lut_extract(lut[i].red, 8) << 16) |
(drm_color_lut_extract(lut[i].green, 8) << 8) |
drm_color_lut_extract(lut[i].blue, 8);
@@ -343,7 +343,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
}
} else {
for (i = 0; i < 256; i++) {
- uint32_t word = (i << 16) | (i << 8) | i;
+ u32 word = (i << 16) | (i << 8) | i;
if (HAS_GMCH_DISPLAY(dev_priv))
I915_WRITE(PALETTE(pipe, i), word);
@@ -353,56 +353,51 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
}
}
-static void i9xx_load_luts(struct drm_crtc_state *crtc_state)
+static void i9xx_load_luts(struct intel_crtc_state *crtc_state)
{
- i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut,
- to_intel_crtc_state(crtc_state));
+ i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
}
/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */
-static void haswell_load_luts(struct drm_crtc_state *crtc_state)
+static void haswell_load_luts(struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->crtc;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *intel_crtc_state =
- to_intel_crtc_state(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bool reenable_ips = false;
/*
* Workaround : Do not read or write the pipe palette/gamma data while
* GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
*/
- if (IS_HASWELL(dev_priv) && intel_crtc_state->ips_enabled &&
- (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
- hsw_disable_ips(intel_crtc_state);
+ if (IS_HASWELL(dev_priv) && crtc_state->ips_enabled &&
+ (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) {
+ hsw_disable_ips(crtc_state);
reenable_ips = true;
}
- intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
- I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+ I915_WRITE(GAMMA_MODE(crtc->pipe), GAMMA_MODE_MODE_8BIT);
i9xx_load_luts(crtc_state);
if (reenable_ips)
- hsw_enable_ips(intel_crtc_state);
+ hsw_enable_ips(crtc_state);
}
-static void bdw_load_degamma_lut(struct drm_crtc_state *state)
+static void bdw_load_degamma_lut(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
- enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
- uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ u32 i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
I915_WRITE(PREC_PAL_INDEX(pipe),
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
- if (state->degamma_lut) {
- struct drm_color_lut *lut = state->degamma_lut->data;
+ if (crtc_state->base.degamma_lut) {
+ struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
for (i = 0; i < lut_size; i++) {
- uint32_t word =
+ u32 word =
drm_color_lut_extract(lut[i].red, 10) << 20 |
drm_color_lut_extract(lut[i].green, 10) << 10 |
drm_color_lut_extract(lut[i].blue, 10);
@@ -411,7 +406,7 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
}
} else {
for (i = 0; i < lut_size; i++) {
- uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+ u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
I915_WRITE(PREC_PAL_DATA(pipe),
(v << 20) | (v << 10) | v);
@@ -419,11 +414,11 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
}
}
-static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
+static void bdw_load_gamma_lut(struct intel_crtc_state *crtc_state, u32 offset)
{
- struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
- enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
- uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ u32 i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
@@ -432,11 +427,11 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
PAL_PREC_AUTO_INCREMENT |
offset);
- if (state->gamma_lut) {
- struct drm_color_lut *lut = state->gamma_lut->data;
+ if (crtc_state->base.gamma_lut) {
+ struct drm_color_lut *lut = crtc_state->base.gamma_lut->data;
for (i = 0; i < lut_size; i++) {
- uint32_t word =
+ u32 word =
(drm_color_lut_extract(lut[i].red, 10) << 20) |
(drm_color_lut_extract(lut[i].green, 10) << 10) |
drm_color_lut_extract(lut[i].blue, 10);
@@ -454,7 +449,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
drm_color_lut_extract(lut[i].blue, 16));
} else {
for (i = 0; i < lut_size; i++) {
- uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+ u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
I915_WRITE(PREC_PAL_DATA(pipe),
(v << 20) | (v << 10) | v);
@@ -467,22 +462,21 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
}
/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
-static void broadwell_load_luts(struct drm_crtc_state *state)
+static void broadwell_load_luts(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
- struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
- enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
- if (crtc_state_is_legacy_gamma(state)) {
- haswell_load_luts(state);
+ if (crtc_state_is_legacy_gamma(crtc_state)) {
+ haswell_load_luts(crtc_state);
return;
}
- bdw_load_degamma_lut(state);
- bdw_load_gamma_lut(state,
+ bdw_load_degamma_lut(crtc_state);
+ bdw_load_gamma_lut(crtc_state,
INTEL_INFO(dev_priv)->color.degamma_lut_size);
- intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
POSTING_READ(GAMMA_MODE(pipe));
@@ -493,12 +487,12 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
-static void glk_load_degamma_lut(struct drm_crtc_state *state)
+static void glk_load_degamma_lut(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
- enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
- const uint32_t lut_size = 33;
- uint32_t i;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
+ const u32 lut_size = 33;
+ u32 i;
/*
* When setting the auto-increment bit, the hardware seems to
@@ -513,7 +507,7 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
* different values per channel, so this just loads a linear table.
*/
for (i = 0; i < lut_size; i++) {
- uint32_t v = (i * (1 << 16)) / (lut_size - 1);
+ u32 v = (i * (1 << 16)) / (lut_size - 1);
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
}
@@ -523,49 +517,46 @@ static void glk_load_degamma_lut(struct drm_crtc_state *state)
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
}
-static void glk_load_luts(struct drm_crtc_state *state)
+static void glk_load_luts(struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ enum pipe pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
- glk_load_degamma_lut(state);
+ glk_load_degamma_lut(crtc_state);
- if (crtc_state_is_legacy_gamma(state)) {
- haswell_load_luts(state);
+ if (crtc_state_is_legacy_gamma(crtc_state)) {
+ haswell_load_luts(crtc_state);
return;
}
- bdw_load_gamma_lut(state, 0);
+ bdw_load_gamma_lut(crtc_state, 0);
- intel_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT);
POSTING_READ(GAMMA_MODE(pipe));
}
/* Loads the palette/gamma unit for the CRTC on CherryView. */
-static void cherryview_load_luts(struct drm_crtc_state *state)
+static void cherryview_load_luts(struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum pipe pipe = to_intel_crtc(crtc)->pipe;
struct drm_color_lut *lut;
- uint32_t i, lut_size;
- uint32_t word0, word1;
+ u32 i, lut_size;
+ u32 word0, word1;
- if (crtc_state_is_legacy_gamma(state)) {
+ if (crtc_state_is_legacy_gamma(crtc_state)) {
/* Turn off degamma/gamma on CGM block. */
I915_WRITE(CGM_PIPE_MODE(pipe),
- (state->ctm ? CGM_PIPE_MODE_CSC : 0));
- i9xx_load_luts_internal(crtc, state->gamma_lut,
- to_intel_crtc_state(state));
+ (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0));
+ i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
return;
}
- if (state->degamma_lut) {
- lut = state->degamma_lut->data;
+ if (crtc_state->base.degamma_lut) {
+ lut = crtc_state->base.degamma_lut->data;
lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.14 format. */
@@ -579,8 +570,8 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
}
}
- if (state->gamma_lut) {
- lut = state->gamma_lut->data;
+ if (crtc_state->base.gamma_lut) {
+ lut = crtc_state->base.gamma_lut->data;
lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.10 format. */
@@ -595,29 +586,28 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
}
I915_WRITE(CGM_PIPE_MODE(pipe),
- (state->ctm ? CGM_PIPE_MODE_CSC : 0) |
- (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
- (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
+ (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0) |
+ (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
+ (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0));
/*
* Also program a linear LUT in the legacy block (behind the
* CGM block).
*/
- i9xx_load_luts_internal(crtc, NULL, to_intel_crtc_state(state));
+ i9xx_load_luts_internal(crtc_state, NULL);
}
-void intel_color_load_luts(struct drm_crtc_state *crtc_state)
+void intel_color_load_luts(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc_state->crtc->dev;
+ struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
dev_priv->display.load_luts(crtc_state);
}
-int intel_color_check(struct drm_crtc *crtc,
- struct drm_crtc_state *crtc_state)
+int intel_color_check(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
size_t gamma_length, degamma_length;
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
@@ -627,10 +617,10 @@ int intel_color_check(struct drm_crtc *crtc,
* We allow both degamma & gamma luts at the right size or
* NULL.
*/
- if ((!crtc_state->degamma_lut ||
- drm_color_lut_size(crtc_state->degamma_lut) == degamma_length) &&
- (!crtc_state->gamma_lut ||
- drm_color_lut_size(crtc_state->gamma_lut) == gamma_length))
+ if ((!crtc_state->base.degamma_lut ||
+ drm_color_lut_size(crtc_state->base.degamma_lut) == degamma_length) &&
+ (!crtc_state->base.gamma_lut ||
+ drm_color_lut_size(crtc_state->base.gamma_lut) == gamma_length))
return 0;
/*
@@ -643,11 +633,11 @@ int intel_color_check(struct drm_crtc *crtc,
return -EINVAL;
}
-void intel_color_init(struct drm_crtc *crtc)
+void intel_color_init(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- drm_mode_crtc_set_gamma_size(crtc, 256);
+ drm_mode_crtc_set_gamma_size(&crtc->base, 256);
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix;
@@ -669,7 +659,7 @@ void intel_color_init(struct drm_crtc *crtc)
/* Enable color management support when we have degamma & gamma LUTs. */
if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
- drm_crtc_enable_color_mgmt(crtc,
+ drm_crtc_enable_color_mgmt(&crtc->base,
INTEL_INFO(dev_priv)->color.degamma_lut_size,
true,
INTEL_INFO(dev_priv)->color.gamma_lut_size);
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
index 37d2c644f4b8..ee16758747c5 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -27,7 +27,6 @@
#include <linux/i2c.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 2e0fd9927db2..c2e799a5e63e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -27,7 +27,6 @@
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -84,15 +83,17 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ intel_wakeref_t wakeref;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
ret = intel_crt_port_enabled(dev_priv, crt->adpa_reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -322,7 +323,7 @@ intel_crt_mode_valid(struct drm_connector *connector,
* DAC limit supposedly 355 MHz.
*/
max_clock = 270000;
- else if (IS_GEN3(dev_priv) || IS_GEN4(dev_priv))
+ else if (IS_GEN_RANGE(dev_priv, 3, 4))
max_clock = 400000;
else
max_clock = 350000;
@@ -630,19 +631,19 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
}
static enum drm_connector_status
-intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
+intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t save_bclrpat;
- uint32_t save_vtotal;
- uint32_t vtotal, vactive;
- uint32_t vsample;
- uint32_t vblank, vblank_start, vblank_end;
- uint32_t dsl;
+ u32 save_bclrpat;
+ u32 save_vtotal;
+ u32 vtotal, vactive;
+ u32 vsample;
+ u32 vblank, vblank_start, vblank_end;
+ u32 dsl;
i915_reg_t bclrpat_reg, vtotal_reg,
vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
- uint8_t st00;
+ u8 st00;
enum drm_connector_status status;
DRM_DEBUG_KMS("starting load-detect on CRT\n");
@@ -667,8 +668,8 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
/* Set the border color to purple. */
I915_WRITE(bclrpat_reg, 0x500050);
- if (!IS_GEN2(dev_priv)) {
- uint32_t pipeconf = I915_READ(pipeconf_reg);
+ if (!IS_GEN(dev_priv, 2)) {
+ u32 pipeconf = I915_READ(pipeconf_reg);
I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
POSTING_READ(pipeconf_reg);
/* Wait for next Vblank to substitue
@@ -689,8 +690,8 @@ intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- uint32_t vsync = I915_READ(vsync_reg);
- uint32_t vsync_start = (vsync & 0xffff) + 1;
+ u32 vsync = I915_READ(vsync_reg);
+ u32 vsync_start = (vsync & 0xffff) + 1;
vblank_start = vsync_start;
I915_WRITE(vblank_reg,
@@ -778,6 +779,7 @@ intel_crt_detect(struct drm_connector *connector,
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
+ intel_wakeref_t wakeref;
int status, ret;
struct intel_load_detect_pipe tmp;
@@ -786,7 +788,8 @@ intel_crt_detect(struct drm_connector *connector,
force);
if (i915_modparams.load_detect_test) {
- intel_display_power_get(dev_priv, intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv,
+ intel_encoder->power_domain);
goto load_detect;
}
@@ -794,7 +797,8 @@ intel_crt_detect(struct drm_connector *connector,
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
- intel_display_power_get(dev_priv, intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv,
+ intel_encoder->power_domain);
if (I915_HAS_HOTPLUG(dev_priv)) {
/* We can not rely on the HPD pin always being correctly wired
@@ -849,7 +853,7 @@ load_detect:
}
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain);
+ intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return status;
}
@@ -859,10 +863,12 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
- int ret;
+ intel_wakeref_t wakeref;
struct i2c_adapter *i2c;
+ int ret;
- intel_display_power_get(dev_priv, intel_encoder->power_domain);
+ wakeref = intel_display_power_get(dev_priv,
+ intel_encoder->power_domain);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
@@ -874,7 +880,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
ret = intel_crt_ddc_get_modes(connector, i2c);
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain);
+ intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}
@@ -982,7 +988,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
connector->interlace_allowed = 0;
else
connector->interlace_allowed = 1;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index a516697bf57d..e8ac04c33e29 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -70,50 +70,50 @@ MODULE_FIRMWARE(BXT_CSR_PATH);
struct intel_css_header {
/* 0x09 for DMC */
- uint32_t module_type;
+ u32 module_type;
/* Includes the DMC specific header in dwords */
- uint32_t header_len;
+ u32 header_len;
/* always value would be 0x10000 */
- uint32_t header_ver;
+ u32 header_ver;
/* Not used */
- uint32_t module_id;
+ u32 module_id;
/* Not used */
- uint32_t module_vendor;
+ u32 module_vendor;
/* in YYYYMMDD format */
- uint32_t date;
+ u32 date;
/* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
- uint32_t size;
+ u32 size;
/* Not used */
- uint32_t key_size;
+ u32 key_size;
/* Not used */
- uint32_t modulus_size;
+ u32 modulus_size;
/* Not used */
- uint32_t exponent_size;
+ u32 exponent_size;
/* Not used */
- uint32_t reserved1[12];
+ u32 reserved1[12];
/* Major Minor */
- uint32_t version;
+ u32 version;
/* Not used */
- uint32_t reserved2[8];
+ u32 reserved2[8];
/* Not used */
- uint32_t kernel_header_info;
+ u32 kernel_header_info;
} __packed;
struct intel_fw_info {
- uint16_t reserved1;
+ u16 reserved1;
/* Stepping (A, B, C, ..., *). * is a wildcard */
char stepping;
@@ -121,8 +121,8 @@ struct intel_fw_info {
/* Sub-stepping (0, 1, ..., *). * is a wildcard */
char substepping;
- uint32_t offset;
- uint32_t reserved2;
+ u32 offset;
+ u32 reserved2;
} __packed;
struct intel_package_header {
@@ -135,14 +135,14 @@ struct intel_package_header {
unsigned char reserved[10];
/* Number of valid entries in the FWInfo array below */
- uint32_t num_entries;
+ u32 num_entries;
struct intel_fw_info fw_info[20];
} __packed;
struct intel_dmc_header {
/* always value would be 0x40403E3E */
- uint32_t signature;
+ u32 signature;
/* DMC binary header length */
unsigned char header_len;
@@ -151,30 +151,30 @@ struct intel_dmc_header {
unsigned char header_ver;
/* Reserved */
- uint16_t dmcc_ver;
+ u16 dmcc_ver;
/* Major, Minor */
- uint32_t project;
+ u32 project;
/* Firmware program size (excluding header) in dwords */
- uint32_t fw_size;
+ u32 fw_size;
/* Major Minor version */
- uint32_t fw_version;
+ u32 fw_version;
/* Number of valid MMIO cycles present. */
- uint32_t mmio_count;
+ u32 mmio_count;
/* MMIO address */
- uint32_t mmioaddr[8];
+ u32 mmioaddr[8];
/* MMIO data */
- uint32_t mmiodata[8];
+ u32 mmiodata[8];
/* FW filename */
unsigned char dfile[32];
- uint32_t reserved1[2];
+ u32 reserved1[2];
} __packed;
struct stepping_info {
@@ -230,7 +230,7 @@ intel_get_stepping_info(struct drm_i915_private *dev_priv)
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
{
- uint32_t val, mask;
+ u32 val, mask;
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
@@ -257,7 +257,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
void intel_csr_load_program(struct drm_i915_private *dev_priv)
{
u32 *payload = dev_priv->csr.dmc_payload;
- uint32_t i, fw_size;
+ u32 i, fw_size;
if (!HAS_CSR(dev_priv)) {
DRM_ERROR("No CSR support available for this platform\n");
@@ -289,17 +289,17 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
gen9_set_dc_state_debugmask(dev_priv);
}
-static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
- const struct firmware *fw)
+static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
+ const struct firmware *fw)
{
struct intel_css_header *css_header;
struct intel_package_header *package_header;
struct intel_dmc_header *dmc_header;
struct intel_csr *csr = &dev_priv->csr;
const struct stepping_info *si = intel_get_stepping_info(dev_priv);
- uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
- uint32_t i;
- uint32_t *dmc_payload;
+ u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
+ u32 i;
+ u32 *dmc_payload;
if (!fw)
return NULL;
@@ -409,6 +409,21 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
return memcpy(dmc_payload, &fw->data[readcount], nbytes);
}
+static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
+{
+ WARN_ON(dev_priv->csr.wakeref);
+ dev_priv->csr.wakeref =
+ intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+}
+
+static void intel_csr_runtime_pm_put(struct drm_i915_private *dev_priv)
+{
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&dev_priv->csr.wakeref);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+}
+
static void csr_load_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
@@ -424,8 +439,7 @@ static void csr_load_work_fn(struct work_struct *work)
if (dev_priv->csr.dmc_payload) {
intel_csr_load_program(dev_priv);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_csr_runtime_pm_put(dev_priv);
DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
dev_priv->csr.fw_path,
@@ -467,7 +481,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
* suspend as runtime suspend *requires* a working CSR for whatever
* reason.
*/
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ intel_csr_runtime_pm_get(dev_priv);
if (INTEL_GEN(dev_priv) >= 12) {
/* Allow to load fw via parameter using the last known size */
@@ -538,7 +552,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
/* Drop the reference held in case DMC isn't loaded. */
if (!dev_priv->csr.dmc_payload)
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_csr_runtime_pm_put(dev_priv);
}
/**
@@ -558,7 +572,7 @@ void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
* loaded.
*/
if (!dev_priv->csr.dmc_payload)
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ intel_csr_runtime_pm_get(dev_priv);
}
/**
@@ -574,6 +588,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
return;
intel_csr_ucode_suspend(dev_priv);
+ WARN_ON(dev_priv->csr.wakeref);
kfree(dev_priv->csr.dmc_payload);
}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index fd06d1fd39d3..b0bb8dfc2ed5 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = {
{ 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */
};
-struct icl_combo_phy_ddi_buf_trans {
- u32 dw2_swing_select;
- u32 dw2_swing_scalar;
- u32 dw4_scaling;
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = {
- /* Voltage mV db */
- { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
- { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
- { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
- { 0x2, 0x98, 0x900F }, /* 400 9.5 */
- { 0xB, 0x70, 0x0018 }, /* 600 0.0 */
- { 0xB, 0x70, 0x3015 }, /* 600 3.5 */
- { 0xB, 0x70, 0x6012 }, /* 600 6.0 */
- { 0x5, 0x00, 0x0018 }, /* 800 0.0 */
- { 0x5, 0x00, 0x3015 }, /* 800 3.5 */
- { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
-};
-
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 0.85V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = {
- /* Voltage mV db */
- { 0x0, 0x00, 0x00 }, /* 200 0.0 */
- { 0x0, 0x00, 0x00 }, /* 200 1.5 */
- { 0x0, 0x00, 0x00 }, /* 200 4.0 */
- { 0x0, 0x00, 0x00 }, /* 200 6.0 */
- { 0x0, 0x00, 0x00 }, /* 250 0.0 */
- { 0x0, 0x00, 0x00 }, /* 250 1.5 */
- { 0x0, 0x00, 0x00 }, /* 250 4.0 */
- { 0x0, 0x00, 0x00 }, /* 300 0.0 */
- { 0x0, 0x00, 0x00 }, /* 300 1.5 */
- { 0x0, 0x00, 0x00 }, /* 350 0.0 */
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = {
- /* Voltage mV db */
- { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
- { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
- { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
- { 0x2, 0x98, 0x900F }, /* 400 9.5 */
- { 0x4, 0x98, 0x0018 }, /* 600 0.0 */
- { 0x4, 0x98, 0x3015 }, /* 600 3.5 */
- { 0x4, 0x98, 0x6012 }, /* 600 6.0 */
- { 0x5, 0x76, 0x0018 }, /* 800 0.0 */
- { 0x5, 0x76, 0x3015 }, /* 800 3.5 */
- { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
+/* icl_combo_phy_ddi_translations */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 0.95V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = {
- /* Voltage mV db */
- { 0x0, 0x00, 0x00 }, /* 200 0.0 */
- { 0x0, 0x00, 0x00 }, /* 200 1.5 */
- { 0x0, 0x00, 0x00 }, /* 200 4.0 */
- { 0x0, 0x00, 0x00 }, /* 200 6.0 */
- { 0x0, 0x00, 0x00 }, /* 250 0.0 */
- { 0x0, 0x00, 0x00 }, /* 250 1.5 */
- { 0x0, 0x00, 0x00 }, /* 250 4.0 */
- { 0x0, 0x00, 0x00 }, /* 300 0.0 */
- { 0x0, 0x00, 0x00 }, /* 300 1.5 */
- { 0x0, 0x00, 0x00 }, /* 350 0.0 */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */
+ { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */
+ { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */
+ { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */
+ { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */
+ { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */
+ { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */
+ { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */
+ { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */
+ { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
};
-/* Voltage Swing Programming for VccIO 1.05V for DP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = {
- /* Voltage mV db */
- { 0x2, 0x98, 0x0018 }, /* 400 0.0 */
- { 0x2, 0x98, 0x3015 }, /* 400 3.5 */
- { 0x2, 0x98, 0x6012 }, /* 400 6.0 */
- { 0x2, 0x98, 0x900F }, /* 400 9.5 */
- { 0x4, 0x98, 0x0018 }, /* 600 0.0 */
- { 0x4, 0x98, 0x3015 }, /* 600 3.5 */
- { 0x4, 0x98, 0x6012 }, /* 600 6.0 */
- { 0x5, 0x71, 0x0018 }, /* 800 0.0 */
- { 0x5, 0x71, 0x3015 }, /* 800 3.5 */
- { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
};
-/* FIXME - After table is updated in Bspec */
-/* Voltage Swing Programming for VccIO 1.05V for eDP */
-static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = {
- /* Voltage mV db */
- { 0x0, 0x00, 0x00 }, /* 200 0.0 */
- { 0x0, 0x00, 0x00 }, /* 200 1.5 */
- { 0x0, 0x00, 0x00 }, /* 200 4.0 */
- { 0x0, 0x00, 0x00 }, /* 200 6.0 */
- { 0x0, 0x00, 0x00 }, /* 250 0.0 */
- { 0x0, 0x00, 0x00 }, /* 250 1.5 */
- { 0x0, 0x00, 0x00 }, /* 250 4.0 */
- { 0x0, 0x00, 0x00 }, /* 300 0.0 */
- { 0x0, 0x00, 0x00 }, /* 300 1.5 */
- { 0x0, 0x00, 0x00 }, /* 350 0.0 */
+static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */
+ { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */
+ { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */
+ { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
};
struct icl_mg_phy_ddi_buf_trans {
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
}
}
-static const struct icl_combo_phy_ddi_buf_trans *
+static const struct cnl_ddi_buf_trans *
icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
- int type, int *n_entries)
+ int type, int rate, int *n_entries)
{
- u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK;
-
- if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
- switch (voltage) {
- case VOLTAGE_INFO_0_85V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V);
- return icl_combo_phy_ddi_translations_edp_0_85V;
- case VOLTAGE_INFO_0_95V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V);
- return icl_combo_phy_ddi_translations_edp_0_95V;
- case VOLTAGE_INFO_1_05V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V);
- return icl_combo_phy_ddi_translations_edp_1_05V;
- default:
- MISSING_CASE(voltage);
- return NULL;
- }
- } else {
- switch (voltage) {
- case VOLTAGE_INFO_0_85V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V);
- return icl_combo_phy_ddi_translations_dp_hdmi_0_85V;
- case VOLTAGE_INFO_0_95V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V);
- return icl_combo_phy_ddi_translations_dp_hdmi_0_95V;
- case VOLTAGE_INFO_1_05V:
- *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V);
- return icl_combo_phy_ddi_translations_dp_hdmi_1_05V;
- default:
- MISSING_CASE(voltage);
- return NULL;
- }
+ if (type == INTEL_OUTPUT_HDMI) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
+ return icl_combo_phy_ddi_translations_hdmi;
+ } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3);
+ return icl_combo_phy_ddi_translations_edp_hbr3;
+ } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2);
+ return icl_combo_phy_ddi_translations_edp_hbr2;
}
+
+ *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2);
+ return icl_combo_phy_ddi_translations_dp_hbr2;
}
static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
if (IS_ICELAKE(dev_priv)) {
if (intel_port_is_combophy(dev_priv, port))
- icl_get_combo_buf_trans(dev_priv, port,
- INTEL_OUTPUT_HDMI, &n_entries);
+ icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
+ 0, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
default_entry = n_entries - 1;
@@ -1039,7 +974,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
}
-static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
+static u32 hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
{
switch (pll->info->id) {
case DPLL_ID_WRPLL1:
@@ -1060,8 +995,8 @@ static uint32_t hsw_pll_to_ddi_pll_sel(const struct intel_shared_dpll *pll)
}
}
-static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static u32 icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
int clock = crtc_state->port_clock;
@@ -1308,8 +1243,8 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id pll_id)
{
i915_reg_t cfgcr1_reg, cfgcr2_reg;
- uint32_t cfgcr1_val, cfgcr2_val;
- uint32_t p0, p1, p2, dco_freq;
+ u32 cfgcr1_val, cfgcr2_val;
+ u32 p0, p1, p2, dco_freq;
cfgcr1_reg = DPLL_CFGCR1(pll_id);
cfgcr2_reg = DPLL_CFGCR2(pll_id);
@@ -1361,14 +1296,17 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
1000) / 0x8000;
+ if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
return dco_freq / (p0 * p1 * p2 * 5);
}
int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
enum intel_dpll_id pll_id)
{
- uint32_t cfgcr0, cfgcr1;
- uint32_t p0, p1, p2, dco_freq, ref_clock;
+ u32 cfgcr0, cfgcr1;
+ u32 p0, p1, p2, dco_freq, ref_clock;
if (INTEL_GEN(dev_priv) >= 11) {
cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
@@ -1533,7 +1471,7 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
int link_clock = 0;
- uint32_t pll_id;
+ u32 pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
if (intel_port_is_combophy(dev_priv, port)) {
@@ -1558,7 +1496,7 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
- uint32_t cfgcr0;
+ u32 cfgcr0;
enum intel_dpll_id pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
@@ -1612,7 +1550,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int link_clock = 0;
- uint32_t dpll_ctl1;
+ u32 dpll_ctl1;
enum intel_dpll_id pll_id;
pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
@@ -1801,7 +1739,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- uint32_t temp;
+ u32 temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
@@ -1819,7 +1757,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = encoder->port;
- uint32_t temp;
+ u32 temp;
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
@@ -1880,7 +1818,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp |= TRANS_DDI_MODE_SELECT_DVI;
if (crtc_state->hdmi_scrambling)
- temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
+ temp |= TRANS_DDI_HDMI_SCRAMBLING;
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
@@ -1903,7 +1841,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
- uint32_t val = I915_READ(reg);
+ u32 val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
@@ -1922,12 +1860,14 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
{
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ intel_wakeref_t wakeref;
enum pipe pipe = 0;
int ret = 0;
- uint32_t tmp;
+ u32 tmp;
- if (WARN_ON(!intel_display_power_get_if_enabled(dev_priv,
- intel_encoder->power_domain)))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ intel_encoder->power_domain);
+ if (WARN_ON(!wakeref))
return -ENXIO;
if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
@@ -1942,7 +1882,7 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain);
+ intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
}
@@ -1953,13 +1893,15 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
struct intel_encoder *encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
- enum pipe pipe = 0;
enum transcoder cpu_transcoder;
- uint32_t tmp;
+ intel_wakeref_t wakeref;
+ enum pipe pipe = 0;
+ u32 tmp;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
if (!encoder->get_hw_state(encoder, &pipe)) {
@@ -2001,7 +1943,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
}
out:
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -2012,6 +1954,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = encoder->port;
+ intel_wakeref_t wakeref;
enum pipe p;
u32 tmp;
u8 mst_pipe_mask;
@@ -2019,8 +1962,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
*pipe_mask = 0;
*is_dp_mst = false;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return;
tmp = I915_READ(DDI_BUF_CTL(port));
@@ -2091,7 +2035,7 @@ out:
"(PHY_CTL %08x)\n", port_name(port), tmp);
}
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2188,7 +2132,7 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
}
static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
- enum port port, uint8_t iboost)
+ enum port port, u8 iboost)
{
u32 tmp;
@@ -2207,7 +2151,7 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- uint8_t iboost;
+ u8 iboost;
if (type == INTEL_OUTPUT_HDMI)
iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
@@ -2275,13 +2219,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
int n_entries;
if (IS_ICELAKE(dev_priv)) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, encoder->type,
- &n_entries);
+ intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -2462,14 +2407,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
}
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
- u32 level, enum port port, int type)
+ u32 level, enum port port, int type,
+ int rate)
{
- const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL;
+ const struct cnl_ddi_buf_trans *ddi_translations = NULL;
u32 n_entries, val;
int ln;
ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
- &n_entries);
+ rate, &n_entries);
if (!ddi_translations)
return;
@@ -2478,34 +2424,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
level = n_entries - 1;
}
- /* Set PORT_TX_DW5 Rterm Sel to 110b. */
+ /* Set PORT_TX_DW5 */
val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
- val &= ~RTERM_SELECT_MASK;
+ val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
+ TAP2_DISABLE | TAP3_DISABLE);
+ val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
-
- /* Program PORT_TX_DW5 */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
- /* Set DisableTap2 and DisableTap3 if MIPI DSI
- * Clear DisableTap2 and DisableTap3 for all other Ports
- */
- if (type == INTEL_OUTPUT_DSI) {
- val |= TAP2_DISABLE;
- val |= TAP3_DISABLE;
- } else {
- val &= ~TAP2_DISABLE;
- val &= ~TAP3_DISABLE;
- }
+ val |= TAP3_DISABLE;
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW2 */
val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
- val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select);
- val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select);
+ val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
+ val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
- val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar);
+ val |= RCOMP_SCALAR(0x98);
I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
/* Program PORT_TX_DW4 */
@@ -2514,9 +2449,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
- val |= ddi_translations[level].dw4_scaling;
+ val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
+ val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
+ val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
}
+
+ /* Program PORT_TX_DW7 */
+ val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
+ val &= ~N_SCALAR_MASK;
+ val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
+ I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
}
static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2581,7 +2524,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
/* 5. Program swing and de-emphasis */
- icl_ddi_combo_vswing_program(dev_priv, level, port, type);
+ icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
/* 6. Set training enable to trigger update */
val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
@@ -2722,7 +2665,7 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
}
-static uint32_t translate_signal_level(int signal_levels)
+static u32 translate_signal_level(int signal_levels)
{
int i;
@@ -2737,9 +2680,9 @@ static uint32_t translate_signal_level(int signal_levels)
return 0;
}
-static uint32_t intel_ddi_dp_level(struct intel_dp *intel_dp)
+static u32 intel_ddi_dp_level(struct intel_dp *intel_dp)
{
- uint8_t train_set = intel_dp->train_set[0];
+ u8 train_set = intel_dp->train_set[0];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -2764,7 +2707,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
return 0;
}
-uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
+u32 ddi_signal_levels(struct intel_dp *intel_dp)
{
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
@@ -2778,8 +2721,8 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
}
static inline
-uint32_t icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
- enum port port)
+u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
+ enum port port)
{
if (intel_port_is_combophy(dev_priv, port)) {
return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
@@ -2914,7 +2857,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- uint32_t val;
+ u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
if (WARN_ON(!pll))
@@ -3349,7 +3292,8 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
intel_edp_panel_vdd_on(intel_dp);
intel_edp_panel_off(intel_dp);
- intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+ intel_display_power_put_unchecked(dev_priv,
+ dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
}
@@ -3369,7 +3313,8 @@ static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
intel_disable_ddi_buf(encoder, old_crtc_state);
- intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+ intel_display_power_put_unchecked(dev_priv,
+ dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
@@ -3411,7 +3356,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- uint32_t val;
+ u32 val;
/*
* Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
@@ -3603,6 +3548,24 @@ static void intel_disable_ddi(struct intel_encoder *encoder,
intel_disable_ddi_dp(encoder, old_crtc_state, old_conn_state);
}
+static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+
+ intel_psr_enable(intel_dp, crtc_state);
+ intel_edp_drrs_enable(intel_dp, crtc_state);
+}
+
+static void intel_ddi_update_pipe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
+}
+
static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
enum port port)
@@ -3671,8 +3634,8 @@ intel_ddi_post_pll_disable(struct intel_encoder *encoder,
if (intel_crtc_has_dp_encoder(crtc_state) ||
intel_port_is_tc(dev_priv, encoder->port))
- intel_display_power_put(dev_priv,
- intel_ddi_main_link_aux_domain(dig_port));
+ intel_display_power_put_unchecked(dev_priv,
+ intel_ddi_main_link_aux_domain(dig_port));
}
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -3681,7 +3644,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
enum port port = intel_dig_port->base.port;
- uint32_t val;
+ u32 val;
bool wait = false;
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
@@ -3793,8 +3756,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
pipe_config->has_infoframe = true;
- if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
- TRANS_DDI_HDMI_SCRAMBLING_MASK)
+ if (temp & TRANS_DDI_HDMI_SCRAMBLING)
pipe_config->hdmi_scrambling = true;
if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
pipe_config->hdmi_high_tmds_clock_ratio = true;
@@ -3901,9 +3863,50 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
}
+static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ intel_dp_encoder_suspend(encoder);
+
+ /*
+ * TODO: disconnect also from USB DP alternate mode once we have a
+ * way to handle the modeset restore in that mode during resume
+ * even if the sink has disappeared while being suspended.
+ */
+ if (dig_port->tc_legacy_port)
+ icl_tc_phy_disconnect(i915, dig_port);
+}
+
+static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
+ struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
+
+ if (intel_port_is_tc(i915, dig_port->base.port))
+ intel_digital_port_connected(&dig_port->base);
+
+ intel_dp_encoder_reset(drm_encoder);
+}
+
+static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+ struct drm_i915_private *i915 = to_i915(encoder->dev);
+
+ intel_dp_encoder_flush_work(encoder);
+
+ if (intel_port_is_tc(i915, dig_port->base.port))
+ icl_tc_phy_disconnect(i915, dig_port);
+
+ drm_encoder_cleanup(encoder);
+ kfree(dig_port);
+}
+
static const struct drm_encoder_funcs intel_ddi_funcs = {
- .reset = intel_dp_encoder_reset,
- .destroy = intel_dp_encoder_destroy,
+ .reset = intel_ddi_encoder_reset,
+ .destroy = intel_ddi_encoder_destroy,
};
static struct intel_connector *
@@ -4147,16 +4150,16 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
+ struct ddi_vbt_port_info *port_info =
+ &dev_priv->vbt.ddi_port_info[port];
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum pipe pipe;
-
- init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
- dev_priv->vbt.ddi_port_info[port].supports_hdmi);
- init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp;
+ init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
+ init_dp = port_info->supports_dp;
if (intel_bios_is_lspcon_present(dev_priv, port)) {
/*
@@ -4195,9 +4198,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
+ intel_encoder->update_pipe = intel_ddi_update_pipe;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
- intel_encoder->suspend = intel_dp_encoder_suspend;
+ intel_encoder->suspend = intel_ddi_encoder_suspend;
intel_encoder->get_power_domains = intel_ddi_get_power_domains;
intel_encoder->type = INTEL_OUTPUT_DDI;
intel_encoder->power_domain = intel_port_to_power_domain(port);
@@ -4216,6 +4220,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
+ intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
+ !port_info->supports_typec_usb &&
+ !port_info->supports_tbt;
+
switch (port) {
case PORT_A:
intel_dig_port->ddi_io_power_domain =
@@ -4274,6 +4282,10 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
}
intel_infoframe_init(intel_dig_port);
+
+ if (intel_port_is_tc(dev_priv, port))
+ intel_digital_port_connected(intel_encoder);
+
return;
err:
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 1e56319334f3..855a5074ad77 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -104,7 +104,7 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
}
-void intel_device_info_dump_runtime(const struct intel_device_info *info,
+void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
struct drm_printer *p)
{
sseu_dump(&info->sseu, p);
@@ -113,21 +113,6 @@ void intel_device_info_dump_runtime(const struct intel_device_info *info,
info->cs_timestamp_frequency_khz);
}
-void intel_device_info_dump(const struct intel_device_info *info,
- struct drm_printer *p)
-{
- struct drm_i915_private *dev_priv =
- container_of(info, struct drm_i915_private, info);
-
- drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
- INTEL_DEVID(dev_priv),
- INTEL_REVID(dev_priv),
- intel_platform_name(info->platform),
- info->gen);
-
- intel_device_info_dump_flags(info, p);
-}
-
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
@@ -164,7 +149,7 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u8 s_en;
u32 ss_en, ss_en_mask;
u8 eu_en;
@@ -203,7 +188,7 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
const u32 fuse2 = I915_READ(GEN8_FUSE2);
int s, ss;
const int eu_mask = 0xff;
@@ -280,7 +265,7 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse;
fuse = I915_READ(CHV_FUSE_GT);
@@ -334,7 +319,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
- struct sseu_dev_info *sseu = &info->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
int s, ss;
u32 fuse2, eu_disable, subslice_mask;
const u8 eu_mask = 0xff;
@@ -437,7 +422,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
int s, ss;
u32 fuse2, subslice_mask, eu_disable[3]; /* s_max */
@@ -519,8 +504,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
{
- struct intel_device_info *info = mkwrite_device_info(dev_priv);
- struct sseu_dev_info *sseu = &info->sseu;
+ struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 fuse1;
int s, ss;
@@ -528,9 +512,9 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv)
* There isn't a register to tell us how many slices/subslices. We
* work off the PCI-ids here.
*/
- switch (info->gt) {
+ switch (INTEL_INFO(dev_priv)->gt) {
default:
- MISSING_CASE(info->gt);
+ MISSING_CASE(INTEL_INFO(dev_priv)->gt);
/* fall through */
case 1:
sseu->slice_mask = BIT(0);
@@ -725,7 +709,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
/**
* intel_device_info_runtime_init - initialize runtime info
- * @info: intel device info struct
+ * @dev_priv: the i915 device
*
* Determine various intel_device_info fields at runtime.
*
@@ -739,29 +723,29 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
* - after the PCH has been detected,
* - before the first usage of the fields it can tweak.
*/
-void intel_device_info_runtime_init(struct intel_device_info *info)
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv =
- container_of(info, struct drm_i915_private, info);
+ struct intel_device_info *info = mkwrite_device_info(dev_priv);
+ struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
enum pipe pipe;
if (INTEL_GEN(dev_priv) >= 10) {
for_each_pipe(dev_priv, pipe)
- info->num_scalers[pipe] = 2;
- } else if (IS_GEN9(dev_priv)) {
- info->num_scalers[PIPE_A] = 2;
- info->num_scalers[PIPE_B] = 2;
- info->num_scalers[PIPE_C] = 1;
+ runtime->num_scalers[pipe] = 2;
+ } else if (IS_GEN(dev_priv, 9)) {
+ runtime->num_scalers[PIPE_A] = 2;
+ runtime->num_scalers[PIPE_B] = 2;
+ runtime->num_scalers[PIPE_C] = 1;
}
BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
- if (IS_GEN11(dev_priv))
+ if (IS_GEN(dev_priv, 11))
for_each_pipe(dev_priv, pipe)
- info->num_sprites[pipe] = 6;
- else if (IS_GEN10(dev_priv) || IS_GEMINILAKE(dev_priv))
+ runtime->num_sprites[pipe] = 6;
+ else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
for_each_pipe(dev_priv, pipe)
- info->num_sprites[pipe] = 3;
+ runtime->num_sprites[pipe] = 3;
else if (IS_BROXTON(dev_priv)) {
/*
* Skylake and Broxton currently don't expose the topmost plane as its
@@ -772,22 +756,22 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
* down the line.
*/
- info->num_sprites[PIPE_A] = 2;
- info->num_sprites[PIPE_B] = 2;
- info->num_sprites[PIPE_C] = 1;
+ runtime->num_sprites[PIPE_A] = 2;
+ runtime->num_sprites[PIPE_B] = 2;
+ runtime->num_sprites[PIPE_C] = 1;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
for_each_pipe(dev_priv, pipe)
- info->num_sprites[pipe] = 2;
+ runtime->num_sprites[pipe] = 2;
} else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
for_each_pipe(dev_priv, pipe)
- info->num_sprites[pipe] = 1;
+ runtime->num_sprites[pipe] = 1;
}
if (i915_modparams.disable_display) {
DRM_INFO("Display disabled (module parameter)\n");
info->num_pipes = 0;
} else if (HAS_DISPLAY(dev_priv) &&
- (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
+ (IS_GEN_RANGE(dev_priv, 7, 8)) &&
HAS_PCH_SPLIT(dev_priv)) {
u32 fuse_strap = I915_READ(FUSE_STRAP);
u32 sfuse_strap = I915_READ(SFUSE_STRAP);
@@ -811,7 +795,7 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
DRM_INFO("PipeC fused off\n");
info->num_pipes -= 1;
}
- } else if (HAS_DISPLAY(dev_priv) && IS_GEN9(dev_priv)) {
+ } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
u32 dfsm = I915_READ(SKL_DFSM);
u8 disabled_mask = 0;
bool invalid;
@@ -851,20 +835,20 @@ void intel_device_info_runtime_init(struct intel_device_info *info)
cherryview_sseu_info_init(dev_priv);
else if (IS_BROADWELL(dev_priv))
broadwell_sseu_info_init(dev_priv);
- else if (IS_GEN9(dev_priv))
+ else if (IS_GEN(dev_priv, 9))
gen9_sseu_info_init(dev_priv);
- else if (IS_GEN10(dev_priv))
+ else if (IS_GEN(dev_priv, 10))
gen10_sseu_info_init(dev_priv);
else if (INTEL_GEN(dev_priv) >= 11)
gen11_sseu_info_init(dev_priv);
- if (IS_GEN6(dev_priv) && intel_vtd_active()) {
+ if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
info->ppgtt = INTEL_PPGTT_NONE;
}
/* Initialize command stream timestamp frequency */
- info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
+ runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
}
void intel_driver_caps_print(const struct intel_driver_caps *caps,
@@ -884,35 +868,44 @@ void intel_driver_caps_print(const struct intel_driver_caps *caps,
void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *info = mkwrite_device_info(dev_priv);
- u32 media_fuse;
+ unsigned int logical_vdbox = 0;
unsigned int i;
+ u32 media_fuse;
if (INTEL_GEN(dev_priv) < 11)
return;
media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
- info->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- info->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
+ RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
- DRM_DEBUG_DRIVER("vdbox enable: %04x\n", info->vdbox_enable);
+ DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
- if (!(BIT(i) & info->vdbox_enable)) {
+ if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
info->ring_mask &= ~ENGINE_MASK(_VCS(i));
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ continue;
}
+
+ /*
+ * In Gen11, only even numbered logical VDBOXes are
+ * hooked up to an SFC (Scaler & Format Converter) unit.
+ */
+ if (logical_vdbox++ % 2 == 0)
+ RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
- DRM_DEBUG_DRIVER("vebox enable: %04x\n", info->vebox_enable);
+ DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
- if (!(BIT(i) & info->vebox_enable)) {
+ if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
info->ring_mask &= ~ENGINE_MASK(_VECS(i));
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 1caf24e2cf0b..957c6527f76b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -89,6 +89,7 @@ enum intel_ppgtt {
func(is_alpha_support); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
+ func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_fpga_dbg); \
func(has_guc); \
@@ -152,12 +153,10 @@ struct sseu_dev_info {
typedef u8 intel_ring_mask_t;
struct intel_device_info {
- u16 device_id;
u16 gen_mask;
u8 gen;
u8 gt; /* GT number, 0 if undefined */
- u8 num_rings;
intel_ring_mask_t ring_mask; /* Rings supported by the HW */
enum intel_platform platform;
@@ -169,8 +168,6 @@ struct intel_device_info {
u32 display_mmio_offset;
u8 num_pipes;
- u8 num_sprites[I915_MAX_PIPES];
- u8 num_scalers[I915_MAX_PIPES];
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
@@ -189,6 +186,20 @@ struct intel_device_info {
int trans_offsets[I915_MAX_TRANSCODERS];
int cursor_offsets[I915_MAX_PIPES];
+ struct color_luts {
+ u16 degamma_lut_size;
+ u16 gamma_lut_size;
+ } color;
+};
+
+struct intel_runtime_info {
+ u16 device_id;
+
+ u8 num_sprites[I915_MAX_PIPES];
+ u8 num_scalers[I915_MAX_PIPES];
+
+ u8 num_rings;
+
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
@@ -198,10 +209,8 @@ struct intel_device_info {
u8 vdbox_enable;
u8 vebox_enable;
- struct color_luts {
- u16 degamma_lut_size;
- u16 gamma_lut_size;
- } color;
+ /* Media engine access to SFC per instance */
+ u8 vdbox_sfc_access;
};
struct intel_driver_caps {
@@ -258,12 +267,10 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
const char *intel_platform_name(enum intel_platform platform);
-void intel_device_info_runtime_init(struct intel_device_info *info);
-void intel_device_info_dump(const struct intel_device_info *info,
- struct drm_printer *p);
+void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
void intel_device_info_dump_flags(const struct intel_device_info *info,
struct drm_printer *p);
-void intel_device_info_dump_runtime(const struct intel_device_info *info,
+void intel_device_info_dump_runtime(const struct intel_runtime_info *info,
struct drm_printer *p);
void intel_device_info_dump_topology(const struct sseu_dev_info *sseu,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52c63135bc65..2fa9f4aec08e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -31,14 +31,7 @@
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <drm/drm_edid.h>
-#include <drm/drmP.h>
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
-#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "intel_dsi.h"
-#include "i915_trace.h"
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
@@ -49,8 +42,17 @@
#include <linux/intel-iommu.h>
#include <linux/reservation.h>
+#include "intel_drv.h"
+#include "intel_dsi.h"
+#include "intel_frontbuffer.h"
+
+#include "i915_drv.h"
+#include "i915_gem_clflush.h"
+#include "i915_reset.h"
+#include "i915_trace.h"
+
/* Primary plane formats for gen <= 3 */
-static const uint32_t i8xx_primary_formats[] = {
+static const u32 i8xx_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB1555,
@@ -58,7 +60,7 @@ static const uint32_t i8xx_primary_formats[] = {
};
/* Primary plane formats for gen >= 4 */
-static const uint32_t i965_primary_formats[] = {
+static const u32 i965_primary_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -67,18 +69,18 @@ static const uint32_t i965_primary_formats[] = {
DRM_FORMAT_XBGR2101010,
};
-static const uint64_t i9xx_format_modifiers[] = {
+static const u64 i9xx_format_modifiers[] = {
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
/* Cursor formats */
-static const uint32_t intel_cursor_formats[] = {
+static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static const uint64_t cursor_format_modifiers[] = {
+static const u64 cursor_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
@@ -494,7 +496,7 @@ static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
-static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
+static u32 i9xx_dpll_compute_m(struct dpll *dpll)
{
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
@@ -529,8 +531,8 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
clock->p = clock->p1 * clock->p2;
if (WARN_ON(clock->n == 0 || clock->p == 0))
return 0;
- clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
- clock->n << 22);
+ clock->vco = DIV_ROUND_CLOSEST_ULL((u64)refclk * clock->m,
+ clock->n << 22);
clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
return clock->dot / 5;
@@ -892,7 +894,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
struct drm_device *dev = crtc->base.dev;
unsigned int best_error_ppm;
struct dpll clock;
- uint64_t m2;
+ u64 m2;
int found = false;
memset(best_clock, 0, sizeof(*best_clock));
@@ -914,7 +916,7 @@ chv_find_best_dpll(const struct intel_limit *limit,
clock.p = clock.p1 * clock.p2;
- m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
+ m2 = DIV_ROUND_CLOSEST_ULL(((u64)target * clock.p *
clock.n) << 22, refclk * clock.m1);
if (m2 > INT_MAX/clock.m1)
@@ -984,7 +986,7 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
u32 line1, line2;
u32 line_mask;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
line_mask = DSL_LINEMASK_GEN2;
else
line_mask = DSL_LINEMASK_GEN3;
@@ -1110,7 +1112,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
u32 val;
/* ILK FDI PLL is always enabled */
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
return;
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1198,17 +1200,19 @@ void assert_pipe(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
state = true;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (wakeref) {
u32 val = I915_READ(PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
} else {
cur_state = false;
}
@@ -1609,7 +1613,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
i915_reg_t reg;
- uint32_t val, pipeconf_val;
+ u32 val, pipeconf_val;
/* Make sure PCH DPLL is enabled */
assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
@@ -1697,7 +1701,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
i915_reg_t reg;
- uint32_t val;
+ u32 val;
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1850,7 +1854,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
{
- return IS_GEN2(dev_priv) ? 2048 : 4096;
+ return IS_GEN(dev_priv, 2) ? 2048 : 4096;
}
static unsigned int
@@ -1863,7 +1867,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
case DRM_FORMAT_MOD_LINEAR:
return cpp;
case I915_FORMAT_MOD_X_TILED:
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
return 128;
else
return 512;
@@ -1872,7 +1876,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
return 128;
/* fall through */
case I915_FORMAT_MOD_Y_TILED:
- if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
+ if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
return 128;
else
return 512;
@@ -2024,6 +2028,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
unsigned int pinctl;
u32 alignment;
@@ -2047,7 +2052,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
* intel_runtime_pm_put(), so it is correct to wrap only the
* pin/unpin/fence and not more.
*/
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
@@ -2102,7 +2107,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
err:
atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return vma;
}
@@ -2373,7 +2378,7 @@ static int intel_fb_offset_to_xy(int *x, int *y,
return 0;
}
-static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
+static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
{
switch (fb_modifier) {
case I915_FORMAT_MOD_X_TILED:
@@ -3193,8 +3198,8 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
- if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
- IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
+ IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -3412,6 +3417,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ intel_wakeref_t wakeref;
bool ret;
u32 val;
@@ -3421,7 +3427,8 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
* display power wells.
*/
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
val = I915_READ(DSPCNTR(i9xx_plane));
@@ -3434,7 +3441,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
*pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
DISPPLANE_SEL_PIPE_SHIFT;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -3503,7 +3510,7 @@ u32 skl_plane_stride(const struct intel_plane_state *plane_state,
return stride / skl_plane_stride_mult(fb, color_plane, rotation);
}
-static u32 skl_plane_ctl_format(uint32_t pixel_format)
+static u32 skl_plane_ctl_format(u32 pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_C8:
@@ -3573,7 +3580,7 @@ static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state
}
}
-static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
+static u32 skl_plane_ctl_tiling(u64 fb_modifier)
{
switch (fb_modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -3746,8 +3753,8 @@ __intel_display_resume(struct drm_device *dev,
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
- return intel_has_gpu_reset(dev_priv) &&
- INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv);
+ return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
+ intel_has_gpu_reset(dev_priv));
}
void intel_prepare_reset(struct drm_i915_private *dev_priv)
@@ -4120,7 +4127,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
@@ -4593,7 +4600,7 @@ static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *c
static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
{
- uint32_t temp;
+ u32 temp;
temp = I915_READ(SOUTH_CHICKEN1);
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
@@ -4919,10 +4926,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
/* range checks */
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
- (IS_GEN11(dev_priv) &&
+ (IS_GEN(dev_priv, 11) &&
(src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
- (!IS_GEN11(dev_priv) &&
+ (!IS_GEN(dev_priv, 11) &&
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
@@ -5213,7 +5220,7 @@ intel_post_enable_primary(struct drm_crtc *crtc,
* FIXME: Need to fix the logic to work when we turn off all planes
* but leave the pipe running.
*/
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
/* Underruns don't always raise interrupts, so check manually. */
@@ -5234,7 +5241,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
hsw_disable_ips(to_intel_crtc_state(crtc->state));
@@ -5292,7 +5299,7 @@ static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
return false;
/* WA Display #0827: Gen9:all */
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
return true;
return false;
@@ -5365,7 +5372,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
* Gen2 reports pipe underruns whenever all planes are disabled.
* So disable underrun reporting before all the planes get disabled.
*/
- if (IS_GEN2(dev_priv) && old_primary_state->visible &&
+ if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
(modeset || !new_primary_state->base.visible))
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
}
@@ -5578,6 +5585,26 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
}
}
+static void intel_encoders_update_pipe(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_new_connector_in_state(old_state, conn, conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != crtc)
+ continue;
+
+ if (encoder->update_pipe)
+ encoder->update_pipe(encoder, crtc_state, conn_state);
+ }
+}
+
static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
@@ -5641,7 +5668,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(&pipe_config->base);
+ intel_color_load_luts(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@@ -5696,7 +5723,7 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- uint32_t val;
+ u32 val;
val = MBUS_DBOX_A_CREDIT(2);
val |= MBUS_DBOX_BW_CREDIT(1);
@@ -5752,7 +5779,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
haswell_set_pipemisc(pipe_config);
- intel_color_set_csc(&pipe_config->base);
+ intel_color_set_csc(pipe_config);
intel_crtc->active = true;
@@ -5771,7 +5798,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
- intel_color_load_luts(&pipe_config->base);
+ intel_color_load_luts(pipe_config);
/*
* Display WA #1153: enable hardware to bypass the alpha math
@@ -6087,7 +6114,7 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain;
for_each_power_domain(domain, domains)
- intel_display_power_put(dev_priv, domain);
+ intel_display_power_put_unchecked(dev_priv, domain);
}
static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
@@ -6117,7 +6144,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
i9xx_set_pipeconf(pipe_config);
- intel_color_set_csc(&pipe_config->base);
+ intel_color_set_csc(pipe_config);
intel_crtc->active = true;
@@ -6137,7 +6164,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
i9xx_pfit_enable(pipe_config);
- intel_color_load_luts(&pipe_config->base);
+ intel_color_load_luts(pipe_config);
dev_priv->display.initial_watermarks(old_intel_state,
pipe_config);
@@ -6184,7 +6211,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_crtc->active = true;
- if (!IS_GEN2(dev_priv))
+ if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_encoders_pre_enable(crtc, pipe_config, old_state);
@@ -6193,7 +6220,7 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
i9xx_pfit_enable(pipe_config);
- intel_color_load_luts(&pipe_config->base);
+ intel_color_load_luts(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state,
@@ -6236,7 +6263,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
*/
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
intel_wait_for_vblank(dev_priv, pipe);
intel_encoders_disable(crtc, old_crtc_state, old_state);
@@ -6261,7 +6288,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
- if (!IS_GEN2(dev_priv))
+ if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
if (!dev_priv->display.initial_watermarks)
@@ -6334,7 +6361,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
domains = intel_crtc->enabled_power_domains;
for_each_power_domain(domain, domains)
- intel_display_power_put(dev_priv, domain);
+ intel_display_power_put_unchecked(dev_priv, domain);
intel_crtc->enabled_power_domains = 0;
dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
@@ -6600,9 +6627,9 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
-static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
+static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
{
- uint32_t pixel_rate;
+ u32 pixel_rate;
pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
@@ -6612,8 +6639,8 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
*/
if (pipe_config->pch_pfit.enabled) {
- uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
- uint32_t pfit_size = pipe_config->pch_pfit.size;
+ u64 pipe_w, pipe_h, pfit_w, pfit_h;
+ u32 pfit_size = pipe_config->pch_pfit.size;
pipe_w = pipe_config->pipe_src_w;
pipe_h = pipe_config->pipe_src_h;
@@ -6628,7 +6655,7 @@ static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
if (WARN_ON(!pfit_w || !pfit_h))
return pixel_rate;
- pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
+ pixel_rate = div_u64((u64)pixel_rate * pipe_w * pipe_h,
pfit_w * pfit_h);
}
@@ -6724,7 +6751,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
static void
-intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
+intel_reduce_m_n_ratio(u32 *num, u32 *den)
{
while (*num > DATA_LINK_M_N_MASK ||
*den > DATA_LINK_M_N_MASK) {
@@ -6734,7 +6761,7 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
}
static void compute_m_n(unsigned int m, unsigned int n,
- uint32_t *ret_m, uint32_t *ret_n,
+ u32 *ret_m, u32 *ret_n,
bool constant_n)
{
/*
@@ -6749,7 +6776,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
else
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
- *ret_m = div_u64((uint64_t) m * *ret_n, n);
+ *ret_m = div_u64((u64)m * *ret_n, n);
intel_reduce_m_n_ratio(ret_m, ret_n);
}
@@ -6779,12 +6806,12 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
-static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
+static u32 pnv_dpll_compute_fp(struct dpll *dpll)
{
return (1 << dpll->n) << 16 | dpll->m2;
}
-static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
+static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
{
return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
}
@@ -6868,7 +6895,7 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
* Strictly speaking some registers are available before
* gen7, but we only support DRRS on gen7+
*/
- return IS_GEN7(dev_priv) || IS_CHERRYVIEW(dev_priv);
+ return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
}
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -7340,7 +7367,7 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
- uint32_t crtc_vtotal, crtc_vblank_end;
+ u32 crtc_vtotal, crtc_vblank_end;
int vsyncshift = 0;
/* We need to be careful not to changed the adjusted mode, for otherwise
@@ -7415,7 +7442,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(HTOTAL(cpu_transcoder));
pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
@@ -7486,7 +7513,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- uint32_t pipeconf;
+ u32 pipeconf;
pipeconf = 0;
@@ -7731,7 +7758,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- uint32_t tmp;
+ u32 tmp;
if (INTEL_GEN(dev_priv) <= 3 &&
(IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
@@ -7946,11 +7973,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
- uint32_t tmp;
+ intel_wakeref_t wakeref;
+ u32 tmp;
bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -8051,7 +8080,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -8225,7 +8254,7 @@ static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
@@ -8247,7 +8276,7 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
/* WaMPhyProgramming:hsw */
static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
{
- uint32_t tmp;
+ u32 tmp;
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
@@ -8328,7 +8357,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
bool with_spread, bool with_fdi)
{
- uint32_t reg, tmp;
+ u32 reg, tmp;
if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
with_spread = true;
@@ -8367,7 +8396,7 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
/* Sequence to disable CLKOUT_DP */
static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
{
- uint32_t reg, tmp;
+ u32 reg, tmp;
mutex_lock(&dev_priv->sb_lock);
@@ -8392,7 +8421,7 @@ static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
#define BEND_IDX(steps) ((50 + (steps)) / 5)
-static const uint16_t sscdivintphase[] = {
+static const u16 sscdivintphase[] = {
[BEND_IDX( 50)] = 0x3B23,
[BEND_IDX( 45)] = 0x3B23,
[BEND_IDX( 40)] = 0x3C23,
@@ -8424,7 +8453,7 @@ static const uint16_t sscdivintphase[] = {
*/
static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
{
- uint32_t tmp;
+ u32 tmp;
int idx = BEND_IDX(steps);
if (WARN_ON(steps % 5 != 0))
@@ -8490,7 +8519,7 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- uint32_t val;
+ u32 val;
val = 0;
@@ -8837,7 +8866,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
- uint32_t ps_ctrl = 0;
+ u32 ps_ctrl = 0;
int id = -1;
int i;
@@ -8993,7 +9022,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(PF_CTL(crtc->pipe));
@@ -9005,7 +9034,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
- if (IS_GEN7(dev_priv)) {
+ if (IS_GEN(dev_priv, 7)) {
WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
PF_PIPE_SEL_IVB(crtc->pipe));
}
@@ -9018,11 +9047,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
- uint32_t tmp;
+ intel_wakeref_t wakeref;
+ u32 tmp;
bool ret;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -9105,7 +9136,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -9145,7 +9176,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
}
-static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
+static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
if (IS_HASWELL(dev_priv))
return I915_READ(D_COMP_HSW);
@@ -9153,7 +9184,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
return I915_READ(D_COMP_BDW);
}
-static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
+static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
{
if (IS_HASWELL(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
@@ -9178,7 +9209,7 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
bool switch_to_fclk, bool allow_power_down)
{
- uint32_t val;
+ u32 val;
assert_can_disable_lcpll(dev_priv);
@@ -9225,7 +9256,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
*/
static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
- uint32_t val;
+ u32 val;
val = I915_READ(LCPLL_CTL);
@@ -9300,7 +9331,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
*/
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
- uint32_t val;
+ u32 val;
DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -9316,7 +9347,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
- uint32_t val;
+ u32 val;
DRM_DEBUG_KMS("Disabling package C8+\n");
@@ -9438,7 +9469,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
- uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+ u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
switch (ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
@@ -9495,7 +9526,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- for_each_set_bit(panel_transcoder, &panel_transcoder_mask, 32) {
+ for_each_set_bit(panel_transcoder,
+ &panel_transcoder_mask,
+ ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
enum pipe trans_pipe;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
@@ -9541,6 +9574,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+
+ WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
*power_domain_mask |= BIT_ULL(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
@@ -9568,6 +9603,8 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
+
+ WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
*power_domain_mask |= BIT_ULL(power_domain);
/*
@@ -9602,7 +9639,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum port port;
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
@@ -9684,7 +9721,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ WARN_ON(power_domain_mask & BIT_ULL(power_domain));
power_domain_mask |= BIT_ULL(power_domain);
+
if (INTEL_GEN(dev_priv) >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
@@ -9714,7 +9753,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
out:
for_each_power_domain(power_domain, power_domain_mask)
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put_unchecked(dev_priv, power_domain);
return active;
}
@@ -9964,17 +10003,19 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(PIPE_A);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
*pipe = PIPE_A;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -9995,7 +10036,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
u32 cntl = 0;
- if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
if (INTEL_GEN(dev_priv) <= 10) {
@@ -10197,6 +10238,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
bool ret;
u32 val;
@@ -10206,7 +10248,8 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
* display power wells.
*/
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
val = I915_READ(CURCNTR(plane->pipe));
@@ -10219,7 +10262,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
*pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
MCURSOR_PIPE_SELECT_SHIFT;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -10468,7 +10511,7 @@ static int i9xx_pll_refclk(struct drm_device *dev,
return dev_priv->vbt.lvds_ssc_freq;
else if (HAS_PCH_SPLIT(dev_priv))
return 120000;
- else if (!IS_GEN2(dev_priv))
+ else if (!IS_GEN(dev_priv, 2))
return 96000;
else
return 48000;
@@ -10501,7 +10544,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
- if (!IS_GEN2(dev_priv)) {
+ if (!IS_GEN(dev_priv, 2)) {
if (IS_PINEVIEW(dev_priv))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
@@ -10653,20 +10696,17 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
/**
* intel_wm_need_update - Check whether watermarks need updating
- * @plane: drm plane
- * @state: new plane state
+ * @cur: current plane state
+ * @new: new plane state
*
* Check current plane state versus the new one to determine whether
* watermarks need to be recalculated.
*
* Returns true or false.
*/
-static bool intel_wm_need_update(struct drm_plane *plane,
- struct drm_plane_state *state)
+static bool intel_wm_need_update(struct intel_plane_state *cur,
+ struct intel_plane_state *new)
{
- struct intel_plane_state *new = to_intel_plane_state(state);
- struct intel_plane_state *cur = to_intel_plane_state(plane->state);
-
/* Update watermarks on tiling or size changes. */
if (new->base.visible != cur->base.visible)
return true;
@@ -10775,7 +10815,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
pipe_config->disable_cxsr = true;
- } else if (intel_wm_need_update(&plane->base, plane_state)) {
+ } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
+ to_intel_plane_state(plane_state))) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
pipe_config->update_wm_pre = true;
@@ -10815,9 +10856,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* Despite the w/a only being listed for IVB we assume that
* the ILK/SNB note has similar ramifications, hence we apply
* the w/a on all three platforms.
+ *
+ * With experimental results seems this is needed also for primary
+ * plane, not only sprite plane.
*/
- if (plane->id == PLANE_SPRITE0 &&
- (IS_GEN5(dev_priv) || IS_GEN6(dev_priv) ||
+ if (plane->id != PLANE_CURSOR &&
+ (IS_GEN_RANGE(dev_priv, 5, 6) ||
IS_IVYBRIDGE(dev_priv)) &&
(turn_on || (!needs_scaling(old_plane_state) &&
needs_scaling(to_intel_plane_state(plane_state)))))
@@ -10954,8 +10998,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
static int intel_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
@@ -10975,7 +11018,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
if (crtc_state->color_mgmt_changed) {
- ret = intel_color_check(crtc, crtc_state);
+ ret = intel_color_check(pipe_config);
if (ret)
return ret;
@@ -11004,9 +11047,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
* old state and the new state. We can program these
* immediately.
*/
- ret = dev_priv->display.compute_intermediate_wm(dev,
- intel_crtc,
- pipe_config);
+ ret = dev_priv->display.compute_intermediate_wm(pipe_config);
if (ret) {
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
@@ -11014,7 +11055,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
}
if (INTEL_GEN(dev_priv) >= 9) {
- if (mode_changed)
+ if (mode_changed || pipe_config->update_pipe)
ret = skl_update_scaler_crtc(pipe_config);
if (!ret)
@@ -11967,7 +12008,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
- skl_pipe_wm_get_hw_state(crtc, &hw_wm);
+ skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
@@ -12381,7 +12422,7 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
* However if queried just before the start of vblank we'll get an
* answer that's slightly in the future.
*/
- if (IS_GEN2(dev_priv)) {
+ if (IS_GEN(dev_priv, 2)) {
const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
int vtotal;
@@ -12622,9 +12663,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* phase. The code here should be run after the per-crtc and per-plane 'check'
* handlers to ensure that all derived state has been updated.
*/
-static int calc_watermark_data(struct drm_atomic_state *state)
+static int calc_watermark_data(struct intel_atomic_state *state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = state->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
/* Is there platform-specific watermark information to calculate? */
@@ -12720,7 +12761,7 @@ static int intel_atomic_check(struct drm_device *dev,
return ret;
intel_fbc_choose_crtc(dev_priv, intel_state);
- return calc_watermark_data(state);
+ return calc_watermark_data(intel_state);
}
static int intel_atomic_prepare_commit(struct drm_device *dev,
@@ -12762,9 +12803,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
} else {
intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
pipe_config);
+
+ if (pipe_config->update_pipe)
+ intel_encoders_update_pipe(crtc, pipe_config, state);
}
- if (new_plane_state)
+ if (pipe_config->update_pipe && !pipe_config->enable_fbc)
+ intel_fbc_disable(intel_crtc);
+ else if (new_plane_state)
intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
intel_begin_crtc_commit(crtc, old_crtc_state);
@@ -12937,6 +12983,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
u64 put_domains[I915_MAX_PIPES] = {};
+ intel_wakeref_t wakeref = 0;
int i;
intel_atomic_commit_fence_wait(intel_state);
@@ -12944,7 +12991,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_wait_for_dependencies(state);
if (intel_state->modeset)
- intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
@@ -13081,7 +13128,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* the culprit.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
/*
@@ -13559,8 +13606,8 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
if (!modeset &&
(intel_cstate->base.color_mgmt_changed ||
intel_cstate->update_pipe)) {
- intel_color_set_csc(&intel_cstate->base);
- intel_color_load_luts(&intel_cstate->base);
+ intel_color_set_csc(intel_cstate);
+ intel_color_load_luts(intel_cstate);
}
/* Perform vblank evasion around commit operation */
@@ -13585,7 +13632,7 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- if (!IS_GEN2(dev_priv))
+ if (!IS_GEN(dev_priv, 2))
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
if (crtc_state->has_pch_encoder) {
@@ -13709,8 +13756,8 @@ intel_legacy_cursor_update(struct drm_plane *plane,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
@@ -14047,7 +14094,7 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
int i;
- crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe];
+ crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
if (!crtc->num_scalers)
return;
@@ -14133,7 +14180,7 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
- intel_color_init(&intel_crtc->base);
+ intel_color_init(intel_crtc);
WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -14192,7 +14239,7 @@ static bool has_edp_a(struct drm_i915_private *dev_priv)
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
@@ -14285,7 +14332,13 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_C);
intel_ddi_init(dev_priv, PORT_D);
intel_ddi_init(dev_priv, PORT_E);
- intel_ddi_init(dev_priv, PORT_F);
+ /*
+ * On some ICL SKUs port F is not present. No strap bits for
+ * this, so rely on VBT.
+ */
+ if (intel_bios_is_port_present(dev_priv, PORT_F))
+ intel_ddi_init(dev_priv, PORT_F);
+
icl_dsi_init(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
/*
@@ -14327,9 +14380,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
if (IS_GEN9_BC(dev_priv) &&
- (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
- dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
- dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
+ intel_bios_is_port_present(dev_priv, PORT_E))
intel_ddi_init(dev_priv, PORT_E);
} else if (HAS_PCH_SPLIT(dev_priv)) {
@@ -14404,7 +14455,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
}
vlv_dsi_init(dev_priv);
- } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) {
+ } else if (!IS_GEN(dev_priv, 2) && !IS_PINEVIEW(dev_priv)) {
bool found = false;
if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
@@ -14438,7 +14489,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
intel_dp_init(dev_priv, DP_D, PORT_D);
- } else if (IS_GEN2(dev_priv))
+ } else if (IS_GEN(dev_priv, 2))
intel_dvo_init(dev_priv);
if (SUPPORTS_TV(dev_priv))
@@ -14636,7 +14687,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
* require the entire fb to accommodate that to avoid
* potential runtime errors at plane configuration time.
*/
- if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 &&
+ if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
is_ccs_modifier(fb->modifier))
stride_alignment *= 4;
@@ -14841,7 +14892,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
- } else if (!IS_GEN2(dev_priv)) {
+ } else if (!IS_GEN(dev_priv, 2)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
dev_priv->display.get_initial_plane_config =
i9xx_get_initial_plane_config;
@@ -14857,9 +14908,9 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.crtc_disable = i9xx_crtc_disable;
}
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- } else if (IS_GEN6(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 6)) {
dev_priv->display.fdi_link_train = gen6_fdi_link_train;
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
@@ -14991,12 +15042,12 @@ fail:
static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
{
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
u32 fdi_pll_clk =
I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
- } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
dev_priv->fdi_pll_freq = 270000;
} else {
return;
@@ -15112,10 +15163,10 @@ int intel_modeset_init(struct drm_device *dev)
}
/* maximum framebuffer dimensions */
- if (IS_GEN2(dev_priv)) {
+ if (IS_GEN(dev_priv, 2)) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
- } else if (IS_GEN3(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 3)) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
@@ -15126,7 +15177,7 @@ int intel_modeset_init(struct drm_device *dev)
if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
dev->mode_config.cursor_height = 1023;
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
dev->mode_config.cursor_width = 64;
dev->mode_config.cursor_height = 64;
} else {
@@ -15483,19 +15534,25 @@ void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
void i915_redisable_vga(struct drm_i915_private *dev_priv)
{
- /* This function can be called both from intel_modeset_setup_hw_state or
+ intel_wakeref_t wakeref;
+
+ /*
+ * This function can be called both from intel_modeset_setup_hw_state or
* at a very early point in our resume sequence, where the power well
* structures are not yet restored. Since this function is at a very
* paranoid "someone might have enabled VGA while we were not looking"
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
- * the rest of the driver uses. */
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
+ * the rest of the driver uses.
+ */
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_VGA);
+ if (!wakeref)
return;
i915_redisable_vga_power_on(dev_priv);
- intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
}
/* FIXME read out full plane state for all planes */
@@ -15795,12 +15852,13 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
struct intel_encoder *encoder;
+ struct intel_crtc *crtc;
+ intel_wakeref_t wakeref;
int i;
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
intel_early_display_was(dev_priv);
intel_modeset_readout_hw_state(dev);
@@ -15850,15 +15908,15 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
}
if (IS_G4X(dev_priv)) {
- g4x_wm_get_hw_state(dev);
+ g4x_wm_get_hw_state(dev_priv);
g4x_wm_sanitize(dev_priv);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- vlv_wm_get_hw_state(dev);
+ vlv_wm_get_hw_state(dev_priv);
vlv_wm_sanitize(dev_priv);
} else if (INTEL_GEN(dev_priv) >= 9) {
- skl_wm_get_hw_state(dev);
+ skl_wm_get_hw_state(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
- ilk_wm_get_hw_state(dev);
+ ilk_wm_get_hw_state(dev_priv);
}
for_each_intel_crtc(dev, crtc) {
@@ -15870,7 +15928,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
modeset_put_power_domains(dev_priv, put_domains);
}
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
intel_fbc_init_pipe_state(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 4262452963b3..c7c068662288 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -121,7 +121,7 @@ enum i9xx_plane_id {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
/*
* Per-pipe plane identifier.
@@ -311,12 +311,12 @@ struct intel_link_m_n {
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
for ((__p) = 0; \
- (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p) < RUNTIME_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
(__p)++)
#define for_each_sprite(__dev_priv, __p, __s) \
for ((__s) = 0; \
- (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s) < RUNTIME_INFO(__dev_priv)->num_sprites[(__p)]; \
(__s)++)
#define for_each_port_masked(__port, __ports_mask) \
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d18b72b5f0b8..f7d5314e3395 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -32,7 +32,6 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <asm/byteorder.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
@@ -304,9 +303,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
- if (port == PORT_B)
+ if (intel_port_is_combophy(dev_priv, port) &&
+ !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -344,7 +345,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
size = ARRAY_SIZE(cnl_rates);
- if (IS_GEN10(dev_priv))
+ if (IS_GEN(dev_priv, 10))
max_rate = cnl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
@@ -428,7 +429,7 @@ static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
}
static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
- uint8_t lane_count)
+ u8 lane_count)
{
/*
* FIXME: we need to synchronize the current link parameters with
@@ -448,7 +449,7 @@ static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
int link_rate,
- uint8_t lane_count)
+ u8 lane_count)
{
const struct drm_display_mode *fixed_mode =
intel_dp->attached_connector->panel.fixed_mode;
@@ -463,7 +464,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
}
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
- int link_rate, uint8_t lane_count)
+ int link_rate, u8 lane_count)
{
int index;
@@ -571,19 +572,19 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
-uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
{
- int i;
- uint32_t v = 0;
+ int i;
+ u32 v = 0;
if (src_bytes > 4)
src_bytes = 4;
for (i = 0; i < src_bytes; i++)
- v |= ((uint32_t) src[i]) << ((3-i) * 8);
+ v |= ((u32)src[i]) << ((3 - i) * 8);
return v;
}
-static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
+static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
{
int i;
if (dst_bytes > 4)
@@ -600,30 +601,39 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
static void
intel_dp_pps_init(struct intel_dp *intel_dp);
-static void pps_lock(struct intel_dp *intel_dp)
+static intel_wakeref_t
+pps_lock(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
/*
* See intel_power_sequencer_reset() why we need
* a power domain reference here.
*/
- intel_display_power_get(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)));
+ wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(dp_to_dig_port(intel_dp)));
mutex_lock(&dev_priv->pps_mutex);
+
+ return wakeref;
}
-static void pps_unlock(struct intel_dp *intel_dp)
+static intel_wakeref_t
+pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
mutex_unlock(&dev_priv->pps_mutex);
-
intel_display_power_put(dev_priv,
- intel_aux_power_domain(dp_to_dig_port(intel_dp)));
+ intel_aux_power_domain(dp_to_dig_port(intel_dp)),
+ wakeref);
+ return 0;
}
+#define with_pps_lock(dp, wf) \
+ for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
+
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
@@ -633,7 +643,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
bool pll_enabled, release_cl_override = false;
enum dpio_phy phy = DPIO_PHY(pipe);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
- uint32_t DP;
+ u32 DP;
if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
"skipping pipe %c power sequencer kick due to port %c being active\n",
@@ -972,30 +982,29 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
edp_notifier);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
- pps_lock(intel_dp);
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
- i915_reg_t pp_ctrl_reg, pp_div_reg;
- u32 pp_div;
-
- pp_ctrl_reg = PP_CONTROL(pipe);
- pp_div_reg = PP_DIVISOR(pipe);
- pp_div = I915_READ(pp_div_reg);
- pp_div &= PP_REFERENCE_DIVIDER_MASK;
-
- /* 0x1F write to PP_DIV_REG sets max cycle delay */
- I915_WRITE(pp_div_reg, pp_div | 0x1F);
- I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
- msleep(intel_dp->panel_power_cycle_delay);
+ with_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+ i915_reg_t pp_ctrl_reg, pp_div_reg;
+ u32 pp_div;
+
+ pp_ctrl_reg = PP_CONTROL(pipe);
+ pp_div_reg = PP_DIVISOR(pipe);
+ pp_div = I915_READ(pp_div_reg);
+ pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+ /* 0x1F write to PP_DIV_REG sets max cycle delay */
+ I915_WRITE(pp_div_reg, pp_div | 0x1F);
+ I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
+ msleep(intel_dp->panel_power_cycle_delay);
+ }
}
- pps_unlock(intel_dp);
-
return 0;
}
@@ -1041,12 +1050,12 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
}
}
-static uint32_t
+static u32
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
- uint32_t status;
+ u32 status;
bool done;
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
@@ -1059,7 +1068,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
return status;
}
-static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1073,7 +1082,7 @@ static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
-static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -1092,7 +1101,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
-static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -1109,7 +1118,7 @@ static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return ilk_get_aux_clock_divider(intel_dp, index);
}
-static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
+static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
/*
* SKL doesn't need us to program the AUX clock divider (Hardware will
@@ -1119,16 +1128,16 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
return index ? 0 : 1;
}
-static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- uint32_t aux_clock_divider)
+static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 aux_clock_divider)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
- uint32_t precharge, timeout;
+ u32 precharge, timeout;
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
precharge = 3;
else
precharge = 5;
@@ -1149,12 +1158,12 @@ static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
(aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
}
-static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
- int send_bytes,
- uint32_t unused)
+static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
+ int send_bytes,
+ u32 unused)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- uint32_t ret;
+ u32 ret;
ret = DP_AUX_CH_CTL_SEND_BUSY |
DP_AUX_CH_CTL_DONE |
@@ -1174,25 +1183,26 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
static int
intel_dp_aux_xfer(struct intel_dp *intel_dp,
- const uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_size,
+ const u8 *send, int send_bytes,
+ u8 *recv, int recv_size,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
to_i915(intel_dig_port->base.base.dev);
i915_reg_t ch_ctl, ch_data[5];
- uint32_t aux_clock_divider;
+ u32 aux_clock_divider;
+ intel_wakeref_t wakeref;
int i, ret, recv_bytes;
- uint32_t status;
int try, clock = 0;
+ u32 status;
bool vdd;
ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
for (i = 0; i < ARRAY_SIZE(ch_data); i++)
ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
- pps_lock(intel_dp);
+ wakeref = pps_lock(intel_dp);
/*
* We will be called with VDD already enabled for dpcd/edid/oui reads.
@@ -1336,7 +1346,7 @@ out:
if (vdd)
edp_panel_vdd_off(intel_dp, false);
- pps_unlock(intel_dp);
+ pps_unlock(intel_dp, wakeref);
return ret;
}
@@ -1358,7 +1368,7 @@ static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
- uint8_t txbuf[20], rxbuf[20];
+ u8 txbuf[20], rxbuf[20];
size_t txsize, rxsize;
int ret;
@@ -1691,7 +1701,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
}
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
- uint8_t *link_bw, uint8_t *rate_select)
+ u8 *link_bw, u8 *rate_select)
{
/* eDP 1.4 rate select method. */
if (intel_dp->use_rate_select) {
@@ -2055,7 +2065,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
&limits);
/* enable compression if the mode doesn't fit available BW */
- if (ret) {
+ DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
+ if (ret || intel_dp->force_dsc_en) {
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits);
if (ret < 0)
@@ -2205,7 +2216,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
}
void intel_dp_set_link_params(struct intel_dp *intel_dp,
- int link_rate, uint8_t lane_count,
+ int link_rate, u8 lane_count,
bool link_mst)
{
intel_dp->link_trained = false;
@@ -2467,15 +2478,15 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
*/
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
+ intel_wakeref_t wakeref;
bool vdd;
if (!intel_dp_is_edp(intel_dp))
return;
- pps_lock(intel_dp);
- vdd = edp_panel_vdd_on(intel_dp);
- pps_unlock(intel_dp);
-
+ vdd = false;
+ with_pps_lock(intel_dp, wakeref)
+ vdd = edp_panel_vdd_on(intel_dp);
I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
port_name(dp_to_dig_port(intel_dp)->base.port));
}
@@ -2514,19 +2525,21 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
- intel_display_power_put(dev_priv,
- intel_aux_power_domain(intel_dig_port));
+ intel_display_power_put_unchecked(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
}
static void edp_panel_vdd_work(struct work_struct *__work)
{
- struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
- struct intel_dp, panel_vdd_work);
+ struct intel_dp *intel_dp =
+ container_of(to_delayed_work(__work),
+ struct intel_dp, panel_vdd_work);
+ intel_wakeref_t wakeref;
- pps_lock(intel_dp);
- if (!intel_dp->want_panel_vdd)
- edp_panel_vdd_off_sync(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ if (!intel_dp->want_panel_vdd)
+ edp_panel_vdd_off_sync(intel_dp);
+ }
}
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
@@ -2590,7 +2603,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp = ironlake_get_pp_control(intel_dp);
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2598,7 +2611,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
}
pp |= PANEL_POWER_ON;
- if (!IS_GEN5(dev_priv))
+ if (!IS_GEN(dev_priv, 5))
pp |= PANEL_POWER_RESET;
I915_WRITE(pp_ctrl_reg, pp);
@@ -2607,7 +2620,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
- if (IS_GEN5(dev_priv)) {
+ if (IS_GEN(dev_priv, 5)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
@@ -2616,12 +2629,13 @@ static void edp_panel_on(struct intel_dp *intel_dp)
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
+ intel_wakeref_t wakeref;
+
if (!intel_dp_is_edp(intel_dp))
return;
- pps_lock(intel_dp);
- edp_panel_on(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref)
+ edp_panel_on(intel_dp);
}
@@ -2660,25 +2674,25 @@ static void edp_panel_off(struct intel_dp *intel_dp)
intel_dp->panel_power_off_time = ktime_get_boottime();
/* We got a reference when we enabled the VDD. */
- intel_display_power_put(dev_priv, intel_aux_power_domain(dig_port));
+ intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
}
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
+ intel_wakeref_t wakeref;
+
if (!intel_dp_is_edp(intel_dp))
return;
- pps_lock(intel_dp);
- edp_panel_off(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref)
+ edp_panel_off(intel_dp);
}
/* Enable backlight in the panel power control. */
static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
+ intel_wakeref_t wakeref;
/*
* If we enable the backlight right away following a panel power
@@ -2688,17 +2702,16 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
*/
wait_backlight_on(intel_dp);
- pps_lock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
- pp = ironlake_get_pp_control(intel_dp);
- pp |= EDP_BLC_ENABLE;
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp = ironlake_get_pp_control(intel_dp);
+ pp |= EDP_BLC_ENABLE;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
-
- pps_unlock(intel_dp);
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ }
}
/* Enable backlight PWM and backlight PP control. */
@@ -2720,23 +2733,21 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp;
- i915_reg_t pp_ctrl_reg;
+ intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
- pps_lock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ u32 pp;
- pp = ironlake_get_pp_control(intel_dp);
- pp &= ~EDP_BLC_ENABLE;
-
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+ pp = ironlake_get_pp_control(intel_dp);
+ pp &= ~EDP_BLC_ENABLE;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
-
- pps_unlock(intel_dp);
+ I915_WRITE(pp_ctrl_reg, pp);
+ POSTING_READ(pp_ctrl_reg);
+ }
intel_dp->last_backlight_off = jiffies;
edp_wait_backlight_off(intel_dp);
@@ -2764,12 +2775,12 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
bool enable)
{
struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
+ intel_wakeref_t wakeref;
bool is_enabled;
- pps_lock(intel_dp);
- is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
- pps_unlock(intel_dp);
-
+ is_enabled = false;
+ with_pps_lock(intel_dp, wakeref)
+ is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
if (is_enabled == enable)
return;
@@ -2836,7 +2847,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
* 1. Wait for the start of vertical blank on the enabled pipe going to FDI
* 2. Program DP PLL enable
*/
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
intel_dp->DP |= DP_PLL_ENABLE;
@@ -2986,16 +2997,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ intel_wakeref_t wakeref;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
encoder->port, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -3163,20 +3176,20 @@ static void chv_post_disable_dp(struct intel_encoder *encoder,
static void
_intel_dp_set_link_train(struct intel_dp *intel_dp,
- uint32_t *DP,
- uint8_t dp_train_pat)
+ u32 *DP,
+ u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
- uint8_t train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
+ u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
if (dp_train_pat & train_pat_mask)
DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
- uint32_t temp = I915_READ(DP_TP_CTL(port));
+ u32 temp = I915_READ(DP_TP_CTL(port));
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3275,24 +3288,23 @@ static void intel_enable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
- uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ u32 dp_reg = I915_READ(intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
- pps_lock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ vlv_init_panel_power_sequencer(encoder, pipe_config);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- vlv_init_panel_power_sequencer(encoder, pipe_config);
+ intel_dp_enable_port(intel_dp, pipe_config);
- intel_dp_enable_port(intel_dp, pipe_config);
-
- edp_panel_vdd_on(intel_dp);
- edp_panel_on(intel_dp);
- edp_panel_vdd_off(intel_dp, true);
-
- pps_unlock(intel_dp);
+ edp_panel_vdd_on(intel_dp);
+ edp_panel_on(intel_dp);
+ edp_panel_vdd_off(intel_dp, true);
+ }
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
unsigned int lane_mask = 0x0;
@@ -3495,14 +3507,14 @@ static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
* link status information
*/
bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
{
return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
/* These are source-specific values. */
-uint8_t
+u8
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -3521,8 +3533,8 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
}
-uint8_t
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+u8
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
@@ -3567,12 +3579,12 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
}
}
-static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
+static u32 vlv_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
- uint8_t train_set = intel_dp->train_set[0];
+ u8 train_set = intel_dp->train_set[0];
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3653,12 +3665,12 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
-static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
+static u32 chv_signal_levels(struct intel_dp *intel_dp)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u32 deemph_reg_value, margin_reg_value;
bool uniq_trans_scale = false;
- uint8_t train_set = intel_dp->train_set[0];
+ u8 train_set = intel_dp->train_set[0];
switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3736,10 +3748,10 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
-static uint32_t
-g4x_signal_levels(uint8_t train_set)
+static u32
+g4x_signal_levels(u8 train_set)
{
- uint32_t signal_levels = 0;
+ u32 signal_levels = 0;
switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
@@ -3775,8 +3787,8 @@ g4x_signal_levels(uint8_t train_set)
}
/* SNB CPU eDP voltage swing and pre-emphasis control */
-static uint32_t
-snb_cpu_edp_signal_levels(uint8_t train_set)
+static u32
+snb_cpu_edp_signal_levels(u8 train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3803,8 +3815,8 @@ snb_cpu_edp_signal_levels(uint8_t train_set)
}
/* IVB CPU eDP voltage swing and pre-emphasis control */
-static uint32_t
-ivb_cpu_edp_signal_levels(uint8_t train_set)
+static u32
+ivb_cpu_edp_signal_levels(u8 train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -3839,8 +3851,8 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
- uint32_t signal_levels, mask = 0;
- uint8_t train_set = intel_dp->train_set[0];
+ u32 signal_levels, mask = 0;
+ u8 train_set = intel_dp->train_set[0];
if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
signal_levels = bxt_signal_levels(intel_dp);
@@ -3854,7 +3866,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
} else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
signal_levels = ivb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
- } else if (IS_GEN6(dev_priv) && port == PORT_A) {
+ } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
signal_levels = snb_cpu_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
@@ -3879,7 +3891,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
- uint8_t dp_train_pat)
+ u8 dp_train_pat)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv =
@@ -3896,7 +3908,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum port port = intel_dig_port->base.port;
- uint32_t val;
+ u32 val;
if (!HAS_DDI(dev_priv))
return;
@@ -3931,7 +3943,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
enum port port = encoder->port;
- uint32_t DP = intel_dp->DP;
+ u32 DP = intel_dp->DP;
if (WARN_ON(HAS_DDI(dev_priv)))
return;
@@ -3990,12 +4002,49 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_dp->DP = DP;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- pps_lock(intel_dp);
- intel_dp->active_pipe = INVALID_PIPE;
- pps_unlock(intel_dp);
+ intel_wakeref_t wakeref;
+
+ with_pps_lock(intel_dp, wakeref)
+ intel_dp->active_pipe = INVALID_PIPE;
}
}
+static void
+intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
+{
+ u8 dpcd_ext[6];
+
+ /*
+ * Prior to DP1.3 the bit represented by
+ * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
+ * if it is set DP_DPCD_REV at 0000h could be at a value less than
+ * the true capability of the panel. The only way to check is to
+ * then compare 0000h and 2200h.
+ */
+ if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
+ return;
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
+ &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
+ DRM_ERROR("DPCD failed read at extended capabilities\n");
+ return;
+ }
+
+ if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
+ DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
+ return;
+ }
+
+ if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
+ return;
+
+ DRM_DEBUG_KMS("Base DPCD: %*ph\n",
+ (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
+
+ memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
+}
+
bool
intel_dp_read_dpcd(struct intel_dp *intel_dp)
{
@@ -4003,6 +4052,8 @@ intel_dp_read_dpcd(struct intel_dp *intel_dp)
sizeof(intel_dp->dpcd)) < 0)
return false; /* aux transfer failed */
+ intel_dp_extended_receiver_capabilities(intel_dp);
+
DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
return intel_dp->dpcd[DP_DPCD_REV] != 0;
@@ -4233,7 +4284,7 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
DP_DPRX_ESI_LEN;
}
-u16 intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
+u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
int mode_clock, int mode_hdisplay)
{
u16 bits_per_pixel, max_bpp_small_joiner_ram;
@@ -4300,7 +4351,7 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
/* Also take into account max slice width */
- min_slice_count = min_t(uint8_t, min_slice_count,
+ min_slice_count = min_t(u8, min_slice_count,
DIV_ROUND_UP(mode_hdisplay,
max_slice_width));
@@ -4318,11 +4369,11 @@ u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
return 0;
}
-static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
int status = 0;
int test_link_rate;
- uint8_t test_lane_count, test_link_bw;
+ u8 test_lane_count, test_link_bw;
/* (DP CTS 1.2)
* 4.3.1.11
*/
@@ -4355,10 +4406,10 @@ static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
return DP_TEST_ACK;
}
-static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
- uint8_t test_pattern;
- uint8_t test_misc;
+ u8 test_pattern;
+ u8 test_misc;
__be16 h_width, v_height;
int status = 0;
@@ -4416,9 +4467,9 @@ static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
return DP_TEST_ACK;
}
-static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
- uint8_t test_result = DP_TEST_ACK;
+ u8 test_result = DP_TEST_ACK;
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_connector *connector = &intel_connector->base;
@@ -4460,16 +4511,16 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
return test_result;
}
-static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
+static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
{
- uint8_t test_result = DP_TEST_NAK;
+ u8 test_result = DP_TEST_NAK;
return test_result;
}
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
- uint8_t response = DP_TEST_NAK;
- uint8_t request = 0;
+ u8 response = DP_TEST_NAK;
+ u8 request = 0;
int status;
status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
@@ -4795,8 +4846,8 @@ static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
- uint8_t *dpcd = intel_dp->dpcd;
- uint8_t type;
+ u8 *dpcd = intel_dp->dpcd;
+ u8 type;
if (lspcon->active)
lspcon_resume(lspcon);
@@ -5033,28 +5084,38 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
}
+static const char *tc_type_name(enum tc_port_type type)
+{
+ static const char * const names[] = {
+ [TC_PORT_UNKNOWN] = "unknown",
+ [TC_PORT_LEGACY] = "legacy",
+ [TC_PORT_TYPEC] = "typec",
+ [TC_PORT_TBT] = "tbt",
+ };
+
+ if (WARN_ON(type >= ARRAY_SIZE(names)))
+ type = TC_PORT_UNKNOWN;
+
+ return names[type];
+}
+
static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
struct intel_digital_port *intel_dig_port,
bool is_legacy, bool is_typec, bool is_tbt)
{
enum port port = intel_dig_port->base.port;
enum tc_port_type old_type = intel_dig_port->tc_type;
- const char *type_str;
WARN_ON(is_legacy + is_typec + is_tbt != 1);
- if (is_legacy) {
+ if (is_legacy)
intel_dig_port->tc_type = TC_PORT_LEGACY;
- type_str = "legacy";
- } else if (is_typec) {
+ else if (is_typec)
intel_dig_port->tc_type = TC_PORT_TYPEC;
- type_str = "typec";
- } else if (is_tbt) {
+ else if (is_tbt)
intel_dig_port->tc_type = TC_PORT_TBT;
- type_str = "tbt";
- } else {
+ else
return;
- }
/* Types are not supposed to be changed at runtime. */
WARN_ON(old_type != TC_PORT_UNKNOWN &&
@@ -5062,12 +5123,9 @@ static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
if (old_type != intel_dig_port->tc_type)
DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
- type_str);
+ tc_type_name(intel_dig_port->tc_type));
}
-static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port);
-
/*
* This function implements the first part of the Connect Flow described by our
* specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
@@ -5102,6 +5160,7 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
val = I915_READ(PORT_TX_DFLEXDPPMS);
if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
+ WARN_ON(dig_port->tc_legacy_port);
return false;
}
@@ -5133,8 +5192,8 @@ static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
* See the comment at the connect function. This implements the Disconnect
* Flow.
*/
-static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port)
+void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port)
{
enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
@@ -5154,6 +5213,10 @@ static void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
}
+ DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
+ port_name(dig_port->base.port),
+ tc_type_name(dig_port->tc_type));
+
dig_port->tc_type = TC_PORT_UNKNOWN;
}
@@ -5175,7 +5238,14 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
bool is_legacy, is_typec, is_tbt;
u32 dpsp;
- is_legacy = I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port);
+ /*
+ * WARN if we got a legacy port HPD, but VBT didn't mark the port as
+ * legacy. Treat the port as legacy from now on.
+ */
+ if (WARN_ON(!intel_dig_port->tc_legacy_port &&
+ I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
+ intel_dig_port->tc_legacy_port = true;
+ is_legacy = intel_dig_port->tc_legacy_port;
/*
* The spec says we shouldn't be using the ISR bits for detecting
@@ -5187,6 +5257,7 @@ static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
if (!is_legacy && !is_typec && !is_tbt) {
icl_tc_phy_disconnect(dev_priv, intel_dig_port);
+
return false;
}
@@ -5238,17 +5309,17 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 11)
return icl_digital_port_connected(encoder);
- else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv))
+ else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
return spt_digital_port_connected(encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_digital_port_connected(encoder);
- else if (IS_GEN8(dev_priv))
+ else if (IS_GEN(dev_priv, 8))
return bdw_digital_port_connected(encoder);
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
return ivb_digital_port_connected(encoder);
- else if (IS_GEN6(dev_priv))
+ else if (IS_GEN(dev_priv, 6))
return snb_digital_port_connected(encoder);
- else if (IS_GEN5(dev_priv))
+ else if (IS_GEN(dev_priv, 5))
return ilk_digital_port_connected(encoder);
MISSING_CASE(INTEL_GEN(dev_priv));
@@ -5310,12 +5381,13 @@ intel_dp_detect(struct drm_connector *connector,
enum drm_connector_status status;
enum intel_display_power_domain aux_domain =
intel_aux_power_domain(dig_port);
+ intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
- intel_display_power_get(dev_priv, aux_domain);
+ wakeref = intel_display_power_get(dev_priv, aux_domain);
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
@@ -5381,7 +5453,7 @@ intel_dp_detect(struct drm_connector *connector,
ret = intel_dp_retrain_link(encoder, ctx);
if (ret) {
- intel_display_power_put(dev_priv, aux_domain);
+ intel_display_power_put(dev_priv, aux_domain, wakeref);
return ret;
}
}
@@ -5405,7 +5477,7 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(dev_priv, aux_domain);
+ intel_display_power_put(dev_priv, aux_domain, wakeref);
return status;
}
@@ -5418,6 +5490,7 @@ intel_dp_force(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
enum intel_display_power_domain aux_domain =
intel_aux_power_domain(dig_port);
+ intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -5426,11 +5499,11 @@ intel_dp_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
- intel_display_power_get(dev_priv, aux_domain);
+ wakeref = intel_display_power_get(dev_priv, aux_domain);
intel_dp_set_edid(intel_dp);
- intel_display_power_put(dev_priv, aux_domain);
+ intel_display_power_put(dev_priv, aux_domain, wakeref);
}
static int intel_dp_get_modes(struct drm_connector *connector)
@@ -5495,21 +5568,22 @@ intel_dp_connector_unregister(struct drm_connector *connector)
intel_connector_unregister(connector);
}
-void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
intel_dp_mst_encoder_cleanup(intel_dig_port);
if (intel_dp_is_edp(intel_dp)) {
+ intel_wakeref_t wakeref;
+
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
- pps_lock(intel_dp);
- edp_panel_vdd_off_sync(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref)
+ edp_panel_vdd_off_sync(intel_dp);
if (intel_dp->edp_notifier.notifier_call) {
unregister_reboot_notifier(&intel_dp->edp_notifier);
@@ -5518,14 +5592,20 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
}
intel_dp_aux_fini(intel_dp);
+}
+
+static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
+{
+ intel_dp_encoder_flush_work(encoder);
drm_encoder_cleanup(encoder);
- kfree(intel_dig_port);
+ kfree(enc_to_dig_port(encoder));
}
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
+ intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
return;
@@ -5535,9 +5615,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
* Make sure vdd is actually turned off here.
*/
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- pps_lock(intel_dp);
- edp_panel_vdd_off_sync(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref)
+ edp_panel_vdd_off_sync(intel_dp);
}
static
@@ -5550,7 +5629,7 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
.address = DP_AUX_HDCP_AKSV,
.size = DRM_HDCP_KSV_LEN,
};
- uint8_t txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
+ u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
ssize_t dpcd_ret;
int ret;
@@ -5583,7 +5662,12 @@ int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
}
reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
- return reply == DP_AUX_NATIVE_REPLY_ACK ? 0 : -EIO;
+ if (reply != DP_AUX_NATIVE_REPLY_ACK) {
+ DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
+ reply);
+ return -EIO;
+ }
+ return 0;
}
static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
@@ -5813,6 +5897,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
+ intel_wakeref_t wakeref;
if (!HAS_DDI(dev_priv))
intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -5822,18 +5907,19 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_dp->reset_link_params = true;
- pps_lock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ intel_dp->active_pipe = vlv_active_pipe(intel_dp);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- intel_dp->active_pipe = vlv_active_pipe(intel_dp);
-
- if (intel_dp_is_edp(intel_dp)) {
- /* Reinit the power sequencer, in case BIOS did something with it. */
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
+ if (intel_dp_is_edp(intel_dp)) {
+ /*
+ * Reinit the power sequencer, in case BIOS did
+ * something nasty with it.
+ */
+ intel_dp_pps_init(intel_dp);
+ intel_edp_panel_vdd_sanitize(intel_dp);
+ }
}
-
- pps_unlock(intel_dp);
}
static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -5866,6 +5952,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum irqreturn ret = IRQ_NONE;
+ intel_wakeref_t wakeref;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
@@ -5888,8 +5975,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
return IRQ_NONE;
}
- intel_display_power_get(dev_priv,
- intel_aux_power_domain(intel_dig_port));
+ wakeref = intel_display_power_get(dev_priv,
+ intel_aux_power_domain(intel_dig_port));
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -5919,7 +6006,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
put_power:
intel_display_power_put(dev_priv,
- intel_aux_power_domain(intel_dig_port));
+ intel_aux_power_domain(intel_dig_port),
+ wakeref);
return ret;
}
@@ -6366,8 +6454,8 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
}
mutex_lock(&dev_priv->drrs.mutex);
- if (WARN_ON(dev_priv->drrs.dp)) {
- DRM_ERROR("DRRS already enabled\n");
+ if (dev_priv->drrs.dp) {
+ DRM_DEBUG_KMS("DRRS already enabled\n");
goto unlock;
}
@@ -6627,8 +6715,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
struct drm_display_mode *scan;
- struct edid *edid;
enum pipe pipe = INVALID_PIPE;
+ intel_wakeref_t wakeref;
+ struct edid *edid;
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -6648,13 +6737,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
- pps_lock(intel_dp);
-
- intel_dp_init_panel_power_timestamps(intel_dp);
- intel_dp_pps_init(intel_dp);
- intel_edp_panel_vdd_sanitize(intel_dp);
-
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref) {
+ intel_dp_init_panel_power_timestamps(intel_dp);
+ intel_dp_pps_init(intel_dp);
+ intel_edp_panel_vdd_sanitize(intel_dp);
+ }
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -6739,9 +6826,8 @@ out_vdd_off:
* vdd might still be enabled do to the delayed vdd off.
* Make sure vdd is actually turned off here.
*/
- pps_lock(intel_dp);
- edp_panel_vdd_off_sync(intel_dp);
- pps_unlock(intel_dp);
+ with_pps_lock(intel_dp, wakeref)
+ edp_panel_vdd_off_sync(intel_dp);
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 30be0e39bd5f..b59c87daa4f7 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -24,7 +24,7 @@
#include "intel_drv.h"
static void
-intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
+intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
{
DRM_DEBUG_KMS("ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x",
@@ -34,17 +34,17 @@ intel_dp_dump_link_status(const uint8_t link_status[DP_LINK_STATUS_SIZE])
static void
intel_get_adjust_train(struct intel_dp *intel_dp,
- const uint8_t link_status[DP_LINK_STATUS_SIZE])
+ const u8 link_status[DP_LINK_STATUS_SIZE])
{
- uint8_t v = 0;
- uint8_t p = 0;
+ u8 v = 0;
+ u8 p = 0;
int lane;
- uint8_t voltage_max;
- uint8_t preemph_max;
+ u8 voltage_max;
+ u8 preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
- uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
- uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+ u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
if (this_v > v)
v = this_v;
@@ -66,9 +66,9 @@ intel_get_adjust_train(struct intel_dp *intel_dp,
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
- uint8_t dp_train_pat)
+ u8 dp_train_pat)
{
- uint8_t buf[sizeof(intel_dp->train_set) + 1];
+ u8 buf[sizeof(intel_dp->train_set) + 1];
int ret, len;
intel_dp_program_link_training_pattern(intel_dp, dp_train_pat);
@@ -92,7 +92,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp,
- uint8_t dp_train_pat)
+ u8 dp_train_pat)
{
memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp);
@@ -128,11 +128,11 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
- uint8_t voltage;
+ u8 voltage;
int voltage_tries, cr_tries, max_cr_tries;
bool max_vswing_reached = false;
- uint8_t link_config[2];
- uint8_t link_bw, rate_select;
+ u8 link_config[2];
+ u8 link_bw, rate_select;
if (intel_dp->prepare_link_retrain)
intel_dp->prepare_link_retrain(intel_dp);
@@ -186,7 +186,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ u8 link_status[DP_LINK_STATUS_SIZE];
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
@@ -282,7 +282,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
int tries;
u32 training_pattern;
- uint8_t link_status[DP_LINK_STATUS_SIZE];
+ u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
training_pattern = intel_dp_training_pattern(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 5899debe2184..f90041cb336d 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -23,7 +23,6 @@
*
*/
-#include <drm/drmP.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <drm/drm_atomic_helper.h>
@@ -248,7 +247,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
int ret;
- uint32_t temp;
+ u32 temp;
/* MST encoders are bound to a crtc, not to a connector,
* force the mapping here for get_hw_state.
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 3c7f10d17658..95cb8b154f87 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -413,7 +413,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
}
if (phy_info->rcomp_phy != -1) {
- uint32_t grc_code;
+ u32 grc_code;
bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
@@ -445,7 +445,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- uint32_t val;
+ u32 val;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -515,7 +515,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- uint32_t mask;
+ u32 mask;
bool ok;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -567,8 +567,8 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
#undef _CHK
}
-uint8_t
-bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
+u8
+bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count)
{
switch (lane_count) {
case 1:
@@ -585,7 +585,7 @@ bxt_ddi_phy_calc_lane_lat_optim_mask(uint8_t lane_count)
}
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
- uint8_t lane_lat_optim_mask)
+ u8 lane_lat_optim_mask)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
@@ -610,7 +610,7 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
}
}
-uint8_t
+u8
bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -618,7 +618,7 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
enum dpio_phy phy;
enum dpio_channel ch;
int lane;
- uint8_t mask;
+ u8 mask;
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
@@ -739,7 +739,7 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
- uint32_t val;
+ u32 val;
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
if (reset)
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index d513ca875c67..606f54dde086 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -345,9 +345,12 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ intel_wakeref_t wakeref;
+ u32 val;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
val = I915_READ(PCH_DPLL(id));
@@ -355,7 +358,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
hw_state->fp0 = I915_READ(PCH_FP0(id));
hw_state->fp1 = I915_READ(PCH_FP1(id));
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return val & DPLL_VCO_ENABLE;
}
@@ -487,7 +490,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ u32 val;
val = I915_READ(WRPLL_CTL(id));
I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
@@ -497,7 +500,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- uint32_t val;
+ u32 val;
val = I915_READ(SPLL_CTL);
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
@@ -509,15 +512,18 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ intel_wakeref_t wakeref;
+ u32 val;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
val = I915_READ(WRPLL_CTL(id));
hw_state->wrpll = val;
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return val & WRPLL_PLL_ENABLE;
}
@@ -526,15 +532,18 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- uint32_t val;
+ intel_wakeref_t wakeref;
+ u32 val;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return val & SPLL_PLL_ENABLE;
}
@@ -630,11 +639,12 @@ static unsigned hsw_wrpll_get_budget_for_freq(int clock)
return budget;
}
-static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget,
- unsigned r2, unsigned n2, unsigned p,
+static void hsw_wrpll_update_rnp(u64 freq2k, unsigned int budget,
+ unsigned int r2, unsigned int n2,
+ unsigned int p,
struct hsw_wrpll_rnp *best)
{
- uint64_t a, b, c, d, diff, diff_best;
+ u64 a, b, c, d, diff, diff_best;
/* No best (r,n,p) yet */
if (best->p == 0) {
@@ -693,7 +703,7 @@ static void
hsw_ddi_calculate_wrpll(int clock /* in Hz */,
unsigned *r2_out, unsigned *n2_out, unsigned *p_out)
{
- uint64_t freq2k;
+ u64 freq2k;
unsigned p, n2, r2;
struct hsw_wrpll_rnp best = { 0, 0, 0 };
unsigned budget;
@@ -759,7 +769,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
struct intel_crtc_state *crtc_state)
{
struct intel_shared_dpll *pll;
- uint32_t val;
+ u32 val;
unsigned int p, n2, r2;
hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
@@ -921,7 +931,7 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ u32 val;
val = I915_READ(DPLL_CTRL1);
@@ -986,12 +996,15 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- uint32_t val;
+ u32 val;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
+ intel_wakeref_t wakeref;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
ret = false;
@@ -1011,7 +1024,7 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@@ -1020,12 +1033,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state)
{
- uint32_t val;
const struct skl_dpll_regs *regs = skl_dpll_regs;
const enum intel_dpll_id id = pll->info->id;
+ intel_wakeref_t wakeref;
+ u32 val;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
ret = false;
@@ -1041,15 +1057,15 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
struct skl_wrpll_context {
- uint64_t min_deviation; /* current minimal deviation */
- uint64_t central_freq; /* chosen central freq */
- uint64_t dco_freq; /* chosen dco freq */
+ u64 min_deviation; /* current minimal deviation */
+ u64 central_freq; /* chosen central freq */
+ u64 dco_freq; /* chosen dco freq */
unsigned int p; /* chosen divider */
};
@@ -1065,11 +1081,11 @@ static void skl_wrpll_context_init(struct skl_wrpll_context *ctx)
#define SKL_DCO_MAX_NDEVIATION 600
static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx,
- uint64_t central_freq,
- uint64_t dco_freq,
+ u64 central_freq,
+ u64 dco_freq,
unsigned int divider)
{
- uint64_t deviation;
+ u64 deviation;
deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq),
central_freq);
@@ -1143,21 +1159,21 @@ static void skl_wrpll_get_multipliers(unsigned int p,
}
struct skl_wrpll_params {
- uint32_t dco_fraction;
- uint32_t dco_integer;
- uint32_t qdiv_ratio;
- uint32_t qdiv_mode;
- uint32_t kdiv;
- uint32_t pdiv;
- uint32_t central_freq;
+ u32 dco_fraction;
+ u32 dco_integer;
+ u32 qdiv_ratio;
+ u32 qdiv_mode;
+ u32 kdiv;
+ u32 pdiv;
+ u32 central_freq;
};
static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
- uint64_t afe_clock,
- uint64_t central_freq,
- uint32_t p0, uint32_t p1, uint32_t p2)
+ u64 afe_clock,
+ u64 central_freq,
+ u32 p0, u32 p1, u32 p2)
{
- uint64_t dco_freq;
+ u64 dco_freq;
switch (central_freq) {
case 9600000000ULL:
@@ -1223,10 +1239,10 @@ static bool
skl_ddi_calculate_wrpll(int clock /* in Hz */,
struct skl_wrpll_params *wrpll_params)
{
- uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
- uint64_t dco_central_freq[3] = {8400000000ULL,
- 9000000000ULL,
- 9600000000ULL};
+ u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
+ u64 dco_central_freq[3] = { 8400000000ULL,
+ 9000000000ULL,
+ 9600000000ULL };
static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20,
24, 28, 30, 32, 36, 40, 42, 44,
48, 52, 54, 56, 60, 64, 66, 68,
@@ -1250,7 +1266,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
for (i = 0; i < dividers[d].n_dividers; i++) {
unsigned int p = dividers[d].list[i];
- uint64_t dco_freq = p * afe_clock;
+ u64 dco_freq = p * afe_clock;
skl_wrpll_try_divider(&ctx,
dco_central_freq[dco],
@@ -1296,7 +1312,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
int clock)
{
- uint32_t ctrl1, cfgcr1, cfgcr2;
+ u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
/*
@@ -1333,7 +1349,7 @@ static bool
skl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
- uint32_t ctrl1;
+ u32 ctrl1;
/*
* See comment in intel_dpll_hw_state to understand why we always use 0
@@ -1435,7 +1451,7 @@ static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- uint32_t temp;
+ u32 temp;
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
enum dpio_phy phy;
enum dpio_channel ch;
@@ -1556,7 +1572,7 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
- uint32_t temp;
+ u32 temp;
temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_ENABLE;
@@ -1579,14 +1595,17 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
- uint32_t val;
- bool ret;
+ intel_wakeref_t wakeref;
enum dpio_phy phy;
enum dpio_channel ch;
+ u32 val;
+ bool ret;
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
ret = false;
@@ -1643,7 +1662,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@@ -1651,12 +1670,12 @@ out:
/* bxt clock parameters */
struct bxt_clk_div {
int clock;
- uint32_t p1;
- uint32_t p2;
- uint32_t m2_int;
- uint32_t m2_frac;
+ u32 p1;
+ u32 p2;
+ u32 m2_int;
+ u32 m2_frac;
bool m2_frac_en;
- uint32_t n;
+ u32 n;
int vco;
};
@@ -1723,8 +1742,8 @@ static bool bxt_ddi_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
int vco = clk_div->vco;
- uint32_t prop_coef, int_coef, gain_ctl, targ_cnt;
- uint32_t lanestagger;
+ u32 prop_coef, int_coef, gain_ctl, targ_cnt;
+ u32 lanestagger;
if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
@@ -1873,7 +1892,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
if (INTEL_GEN(dev_priv) < 9) {
- uint32_t val = I915_READ(LCPLL_CTL);
+ u32 val = I915_READ(LCPLL_CTL);
/*
* The LCPLL register should be turned on by the BIOS. For now
@@ -1959,7 +1978,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ u32 val;
/* 1. Enable DPLL power in DPLL_ENABLE. */
val = I915_READ(CNL_DPLL_ENABLE(id));
@@ -2034,7 +2053,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ u32 val;
/*
* 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
@@ -2091,10 +2110,13 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
+ intel_wakeref_t wakeref;
+ u32 val;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
ret = false;
@@ -2113,7 +2135,7 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@@ -2225,7 +2247,7 @@ cnl_ddi_calculate_wrpll(int clock,
struct skl_wrpll_params *wrpll_params)
{
u32 afe_clock = clock * 5;
- uint32_t ref_clock;
+ u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2271,7 +2293,7 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
int clock)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- uint32_t cfgcr0, cfgcr1;
+ u32 cfgcr0, cfgcr1;
struct skl_wrpll_params wrpll_params = { 0, };
cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
@@ -2300,7 +2322,7 @@ static bool
cnl_ddi_dp_set_dpll_hw_state(int clock,
struct intel_dpll_hw_state *dpll_hw_state)
{
- uint32_t cfgcr0;
+ u32 cfgcr0;
cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
@@ -2517,7 +2539,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- uint32_t cfgcr0, cfgcr1;
+ u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
@@ -2547,10 +2569,10 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
}
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
- uint32_t pll_id)
+ u32 pll_id)
{
- uint32_t cfgcr0, cfgcr1;
- uint32_t pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
+ u32 cfgcr0, cfgcr1;
+ u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
const struct skl_wrpll_params *params;
int index, n_entries, link_clock;
@@ -2633,10 +2655,10 @@ bool intel_dpll_is_combophy(enum intel_dpll_id id)
}
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
- uint32_t *target_dco_khz,
+ u32 *target_dco_khz,
struct intel_dpll_hw_state *state)
{
- uint32_t dco_min_freq, dco_max_freq;
+ u32 dco_min_freq, dco_max_freq;
int div1_vals[] = {7, 5, 3, 2};
unsigned int i;
int div2;
@@ -2712,12 +2734,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int refclk_khz = dev_priv->cdclk.hw.ref;
- uint32_t dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
- uint32_t iref_ndiv, iref_trim, iref_pulse_w;
- uint32_t prop_coeff, int_coeff;
- uint32_t tdc_targetcnt, feedfwgain;
- uint64_t ssc_stepsize, ssc_steplen, ssc_steplog;
- uint64_t tmp;
+ u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
+ u32 iref_ndiv, iref_trim, iref_pulse_w;
+ u32 prop_coeff, int_coeff;
+ u32 tdc_targetcnt, feedfwgain;
+ u64 ssc_stepsize, ssc_steplen, ssc_steplog;
+ u64 tmp;
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
@@ -2740,7 +2762,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
m2div_rem = dco_khz % (refclk_khz * m1div);
- tmp = (uint64_t)m2div_rem * (1 << 22);
+ tmp = (u64)m2div_rem * (1 << 22);
do_div(tmp, refclk_khz * m1div);
m2div_frac = tmp;
@@ -2799,11 +2821,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
if (use_ssc) {
- tmp = (uint64_t)dco_khz * 47 * 32;
+ tmp = (u64)dco_khz * 47 * 32;
do_div(tmp, refclk_khz * m1div * 10000);
ssc_stepsize = tmp;
- tmp = (uint64_t)dco_khz * 1000;
+ tmp = (u64)dco_khz * 1000;
ssc_steplen = DIV_ROUND_UP_ULL(tmp, 32 * 2 * 32);
} else {
ssc_stepsize = 0;
@@ -2950,11 +2972,14 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state)
{
const enum intel_dpll_id id = pll->info->id;
- uint32_t val;
- enum port port;
+ intel_wakeref_t wakeref;
bool ret = false;
+ enum port port;
+ u32 val;
- if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
return false;
val = I915_READ(icl_pll_id_to_enable_reg(id));
@@ -3007,7 +3032,7 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = true;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
return ret;
}
@@ -3077,7 +3102,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
- uint32_t val;
+ u32 val;
val = I915_READ(enable_reg);
val |= PLL_POWER_ENABLE;
@@ -3118,7 +3143,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
- uint32_t val;
+ u32 val;
/* The first steps are done by intel_ddi_post_disable(). */
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index a033d8f06d4a..e96e79413b54 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -138,14 +138,14 @@ enum intel_dpll_id {
struct intel_dpll_hw_state {
/* i9xx, pch plls */
- uint32_t dpll;
- uint32_t dpll_md;
- uint32_t fp0;
- uint32_t fp1;
+ u32 dpll;
+ u32 dpll_md;
+ u32 fp0;
+ u32 fp1;
/* hsw, bdw */
- uint32_t wrpll;
- uint32_t spll;
+ u32 wrpll;
+ u32 spll;
/* skl */
/*
@@ -154,34 +154,33 @@ struct intel_dpll_hw_state {
* the register. This allows us to easily compare the state to share
* the DPLL.
*/
- uint32_t ctrl1;
+ u32 ctrl1;
/* HDMI only, 0 when used for DP */
- uint32_t cfgcr1, cfgcr2;
+ u32 cfgcr1, cfgcr2;
/* cnl */
- uint32_t cfgcr0;
+ u32 cfgcr0;
/* CNL also uses cfgcr1 */
/* bxt */
- uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10,
- pcsdw12;
+ u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
/*
* ICL uses the following, already defined:
- * uint32_t cfgcr0, cfgcr1;
- */
- uint32_t mg_refclkin_ctl;
- uint32_t mg_clktop2_coreclkctl1;
- uint32_t mg_clktop2_hsclkctl;
- uint32_t mg_pll_div0;
- uint32_t mg_pll_div1;
- uint32_t mg_pll_lf;
- uint32_t mg_pll_frac_lock;
- uint32_t mg_pll_ssc;
- uint32_t mg_pll_bias;
- uint32_t mg_pll_tdc_coldst_bias;
- uint32_t mg_pll_bias_mask;
- uint32_t mg_pll_tdc_coldst_bias_mask;
+ * u32 cfgcr0, cfgcr1;
+ */
+ u32 mg_refclkin_ctl;
+ u32 mg_clktop2_coreclkctl1;
+ u32 mg_clktop2_hsclkctl;
+ u32 mg_pll_div0;
+ u32 mg_pll_div1;
+ u32 mg_pll_lf;
+ u32 mg_pll_frac_lock;
+ u32 mg_pll_ssc;
+ u32 mg_pll_bias;
+ u32 mg_pll_tdc_coldst_bias;
+ u32 mg_pll_bias_mask;
+ u32 mg_pll_tdc_coldst_bias_mask;
};
/**
@@ -280,7 +279,7 @@ struct dpll_info {
* Inform the state checker that the DPLL is kept enabled even if
* not in use by any CRTC.
*/
- uint32_t flags;
+ u32 flags;
};
/**
@@ -343,7 +342,7 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
- uint32_t pll_id);
+ u32 pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_port_to_mg_pll_id(enum port port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 19d9abd2666e..33b733d37706 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/hdmi.h>
#include <linux/sched/clock.h>
+#include <linux/stackdepot.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <drm/drm_crtc.h>
@@ -41,6 +42,8 @@
#include <drm/drm_atomic.h>
#include <media/cec-notifier.h>
+struct drm_printer;
+
/**
* __wait_for - magic wait macro
*
@@ -243,6 +246,9 @@ struct intel_encoder {
void (*post_pll_disable)(struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
+ void (*update_pipe)(struct intel_encoder *,
+ const struct intel_crtc_state *,
+ const struct drm_connector_state *);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
@@ -294,13 +300,12 @@ struct intel_panel {
/* Connector and platform specific backlight functions */
int (*setup)(struct intel_connector *connector, enum pipe pipe);
- uint32_t (*get)(struct intel_connector *connector);
- void (*set)(const struct drm_connector_state *conn_state, uint32_t level);
+ u32 (*get)(struct intel_connector *connector);
+ void (*set)(const struct drm_connector_state *conn_state, u32 level);
void (*disable)(const struct drm_connector_state *conn_state);
void (*enable)(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
- uint32_t (*hz_to_pwm)(struct intel_connector *connector,
- uint32_t hz);
+ u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz);
void (*power)(struct intel_connector *, bool enable);
} backlight;
};
@@ -592,7 +597,7 @@ struct intel_initial_plane_config {
struct intel_scaler {
int in_use;
- uint32_t mode;
+ u32 mode;
};
struct intel_crtc_scaler_state {
@@ -630,7 +635,7 @@ struct intel_crtc_scaler_state {
struct intel_pipe_wm {
struct intel_wm_level wm[5];
- uint32_t linetime;
+ u32 linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
@@ -646,7 +651,7 @@ struct skl_plane_wm {
struct skl_pipe_wm {
struct skl_plane_wm planes[I915_MAX_PLANES];
- uint32_t linetime;
+ u32 linetime;
};
enum vlv_wm_level {
@@ -659,7 +664,7 @@ enum vlv_wm_level {
struct vlv_wm_state {
struct g4x_pipe_wm wm[NUM_VLV_WM_LEVELS];
struct g4x_sr_wm sr[NUM_VLV_WM_LEVELS];
- uint8_t num_levels;
+ u8 num_levels;
bool cxsr;
};
@@ -872,13 +877,13 @@ struct intel_crtc_state {
/* Used by SDVO (and if we ever fix it, HDMI). */
unsigned pixel_multiplier;
- uint8_t lane_count;
+ u8 lane_count;
/*
* Used by platforms having DP/HDMI PHY with programmable lane
* latency optimization.
*/
- uint8_t lane_lat_optim_mask;
+ u8 lane_lat_optim_mask;
/* minimum acceptable voltage level */
u8 min_voltage_level;
@@ -922,7 +927,7 @@ struct intel_crtc_state {
struct intel_crtc_wm_state wm;
/* Gamma mode programmed on the pipe */
- uint32_t gamma_mode;
+ u32 gamma_mode;
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
@@ -1008,7 +1013,7 @@ struct intel_plane {
enum pipe pipe;
bool has_fbc;
bool has_ccs;
- uint32_t frontbuffer_bit;
+ u32 frontbuffer_bit;
struct {
u32 base, cntl, size;
@@ -1103,9 +1108,9 @@ enum link_m_n_set {
struct intel_dp_compliance_data {
unsigned long edid;
- uint8_t video_pattern;
- uint16_t hdisplay, vdisplay;
- uint8_t bpc;
+ u8 video_pattern;
+ u16 hdisplay, vdisplay;
+ u8 bpc;
};
struct intel_dp_compliance {
@@ -1118,18 +1123,18 @@ struct intel_dp_compliance {
struct intel_dp {
i915_reg_t output_reg;
- uint32_t DP;
+ u32 DP;
int link_rate;
- uint8_t lane_count;
- uint8_t sink_count;
+ u8 lane_count;
+ u8 sink_count;
bool link_mst;
bool link_trained;
bool has_audio;
bool reset_link_params;
- uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
- uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
- uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
- uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
+ u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
u8 fec_capable;
/* source rates */
@@ -1149,7 +1154,7 @@ struct intel_dp {
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
- uint8_t train_set[4];
+ u8 train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
int panel_power_cycle_delay;
@@ -1191,14 +1196,13 @@ struct intel_dp {
struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mst_mgr;
- uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
+ u32 (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
* This function returns the value we have to program the AUX_CTL
* register with to kick off an AUX transaction.
*/
- uint32_t (*get_aux_send_ctl)(struct intel_dp *dp,
- int send_bytes,
- uint32_t aux_clock_divider);
+ u32 (*get_aux_send_ctl)(struct intel_dp *dp, int send_bytes,
+ u32 aux_clock_divider);
i915_reg_t (*aux_ch_ctl_reg)(struct intel_dp *dp);
i915_reg_t (*aux_ch_data_reg)(struct intel_dp *dp, int index);
@@ -1208,6 +1212,9 @@ struct intel_dp {
/* Displayport compliance testing */
struct intel_dp_compliance compliance;
+
+ /* Display stream compression testing */
+ bool force_dsc_en;
};
enum lspcon_vendor {
@@ -1229,10 +1236,11 @@ struct intel_digital_port {
struct intel_lspcon lspcon;
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
- uint8_t max_lanes;
+ u8 max_lanes;
/* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */
enum aux_ch aux_ch;
enum intel_display_power_domain ddi_io_power_domain;
+ bool tc_legacy_port:1;
enum tc_port_type tc_type;
void (*write_infoframe)(struct intel_encoder *encoder,
@@ -1463,8 +1471,8 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
/* i915_irq.c */
-void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
-void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
+void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
@@ -1527,7 +1535,7 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
u32 bxt_signal_levels(struct intel_dp *intel_dp);
-uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
+u32 ddi_signal_levels(struct intel_dp *intel_dp);
u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
u8 voltage_swing);
@@ -1667,11 +1675,11 @@ void intel_cleanup_plane_fb(struct drm_plane *plane,
int intel_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
- uint64_t *val);
+ u64 *val);
int intel_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
- uint64_t val);
+ u64 val);
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct drm_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
@@ -1791,10 +1799,10 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
- int link_rate, uint8_t lane_count,
+ int link_rate, u8 lane_count,
bool link_mst);
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
- int link_rate, uint8_t lane_count);
+ int link_rate, u8 lane_count);
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
int intel_dp_retrain_link(struct intel_encoder *encoder,
@@ -1805,7 +1813,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
bool enable);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
-void intel_dp_encoder_destroy(struct drm_encoder *encoder);
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
int intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
@@ -1826,7 +1834,7 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
void intel_plane_destroy(struct drm_plane *plane);
void intel_edp_drrs_enable(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
@@ -1839,24 +1847,24 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
- uint8_t dp_train_pat);
+ u8 dp_train_pat);
void
intel_dp_set_signal_levels(struct intel_dp *intel_dp);
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
-uint8_t
+u8
intel_dp_voltage_max(struct intel_dp *intel_dp);
-uint8_t
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing);
+u8
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
- uint8_t *link_bw, uint8_t *rate_select);
+ u8 *link_bw, u8 *rate_select);
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
-uint16_t intel_dp_dsc_get_output_bpp(int link_clock, uint8_t lane_count,
- int mode_clock, int mode_hdisplay);
-uint8_t intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
- int mode_hdisplay);
+intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]);
+u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
+ int mode_clock, int mode_hdisplay);
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+ int mode_hdisplay);
/* intel_vdsc.c */
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
@@ -1873,6 +1881,8 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
bool intel_digital_port_connected(struct intel_encoder *encoder);
+void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port);
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -2074,6 +2084,7 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
void intel_init_quirks(struct drm_i915_private *dev_priv);
/* intel_runtime_pm.c */
+void intel_runtime_pm_init_early(struct drm_i915_private *dev_priv);
int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
@@ -2096,6 +2107,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
void intel_runtime_pm_disable(struct drm_i915_private *dev_priv);
+void intel_runtime_pm_cleanup(struct drm_i915_private *dev_priv);
const char *
intel_display_power_domain_str(enum intel_display_power_domain domain);
@@ -2103,33 +2115,42 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
-void intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
-bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain);
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref);
+#else
+#define intel_display_power_put(i915, domain, wakeref) \
+ intel_display_power_put_unchecked(i915, domain)
+#endif
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices);
static inline void
-assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv)
+assert_rpm_device_not_suspended(struct drm_i915_private *i915)
{
- WARN_ONCE(dev_priv->runtime_pm.suspended,
+ WARN_ONCE(i915->runtime_pm.suspended,
"Device suspended during HW access\n");
}
static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
+assert_rpm_wakelock_held(struct drm_i915_private *i915)
{
- assert_rpm_device_not_suspended(dev_priv);
- WARN_ONCE(!atomic_read(&dev_priv->runtime_pm.wakeref_count),
+ assert_rpm_device_not_suspended(i915);
+ WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count),
"RPM wakelock ref not held during HW access");
}
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function disable asserts that check if we hold an RPM wakelock
* reference, while keeping the device-not-suspended checks still enabled.
@@ -2146,14 +2167,14 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
* enable_rpm_wakeref_asserts().
*/
static inline void
-disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
+disable_rpm_wakeref_asserts(struct drm_i915_private *i915)
{
- atomic_inc(&dev_priv->runtime_pm.wakeref_count);
+ atomic_inc(&i915->runtime_pm.wakeref_count);
}
/**
* enable_rpm_wakeref_asserts - re-enable the RPM assert checks
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function re-enables the RPM assert checks after disabling them with
* disable_rpm_wakeref_asserts. It's meant to be used only in special
@@ -2163,15 +2184,39 @@ disable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
* disable_rpm_wakeref_asserts().
*/
static inline void
-enable_rpm_wakeref_asserts(struct drm_i915_private *dev_priv)
+enable_rpm_wakeref_asserts(struct drm_i915_private *i915)
{
- atomic_dec(&dev_priv->runtime_pm.wakeref_count);
+ atomic_dec(&i915->runtime_pm.wakeref_count);
}
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
-bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
+intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915);
+intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915);
+
+#define with_intel_runtime_pm(i915, wf) \
+ for ((wf) = intel_runtime_pm_get(i915); (wf); \
+ intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+
+#define with_intel_runtime_pm_if_in_use(i915, wf) \
+ for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \
+ intel_runtime_pm_put((i915), (wf)), (wf) = 0)
+
+void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref);
+#else
+#define intel_runtime_pm_put(i915, wref) intel_runtime_pm_put_unchecked(i915)
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ struct drm_printer *p);
+#else
+static inline void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ struct drm_printer *p)
+{
+}
+#endif
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
@@ -2199,16 +2244,16 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
-void g4x_wm_get_hw_state(struct drm_device *dev);
-void vlv_wm_get_hw_state(struct drm_device *dev);
-void ilk_wm_get_hw_state(struct drm_device *dev);
-void skl_wm_get_hw_state(struct drm_device *dev);
+void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
-void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
@@ -2277,11 +2322,11 @@ void intel_tv_init(struct drm_i915_private *dev_priv);
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
- uint64_t *val);
+ u64 *val);
int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
- uint64_t val);
+ u64 val);
int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_connector_state *new_state);
struct drm_connector_state *
@@ -2326,10 +2371,10 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane_state *intel_state);
/* intel_color.c */
-void intel_color_init(struct drm_crtc *crtc);
-int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
-void intel_color_set_csc(struct drm_crtc_state *crtc_state);
-void intel_color_load_luts(struct drm_crtc_state *crtc_state);
+void intel_color_init(struct intel_crtc *crtc);
+int intel_color_check(struct intel_crtc_state *crtc_state);
+void intel_color_set_csc(struct intel_crtc_state *crtc_state);
+void intel_color_load_luts(struct intel_crtc_state *crtc_state);
/* intel_lspcon.c */
bool lspcon_init(struct intel_digital_port *intel_dig_port);
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index d968f1f13e09..a9a19778dc7f 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -24,7 +24,6 @@
#ifndef _INTEL_DSI_H
#define _INTEL_DSI_H
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include "intel_drv.h"
@@ -40,6 +39,7 @@ struct intel_dsi {
struct intel_encoder base;
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
+ intel_wakeref_t io_wakeref[I915_MAX_PORTS];
/* GPIO Desc for CRC based Panel control */
struct gpio_desc *gpio_panel;
@@ -173,7 +173,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
void vlv_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void vlv_dsi_pll_disable(struct intel_encoder *encoder);
-u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
@@ -183,7 +183,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
void bxt_dsi_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *config);
void bxt_dsi_pll_disable(struct intel_encoder *encoder);
-u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index a1a8b3790e61..06a11c35a784 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -24,15 +24,15 @@
*
*/
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
#include <linux/gpio/consumer.h>
+#include <linux/mfd/intel_soc_pmic.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
#include <asm/intel-mid.h>
-#include <video/mipi_display.h>
+#include <asm/unaligned.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include "intel_dsi.h"
@@ -393,7 +393,25 @@ static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
- DRM_DEBUG_KMS("Skipping PMIC element execution\n");
+#ifdef CONFIG_PMIC_OPREGION
+ u32 value, mask, reg_address;
+ u16 i2c_address;
+ int ret;
+
+ /* byte 0 aka PMIC Flag is reserved */
+ i2c_address = get_unaligned_le16(data + 1);
+ reg_address = get_unaligned_le32(data + 3);
+ value = get_unaligned_le32(data + 7);
+ mask = get_unaligned_le32(data + 11);
+
+ ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_address,
+ reg_address,
+ value, mask);
+ if (ret)
+ DRM_ERROR("%s failed, error: %d\n", __func__, ret);
+#else
+ DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
+#endif
return data + 15;
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 17a16917e134..a6c82482a841 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -26,7 +26,6 @@
*/
#include <linux/i2c.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include "intel_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index ff5b7bc692ce..2f3c71f6d313 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -25,6 +25,7 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "i915_reset.h"
#include "intel_ringbuffer.h"
#include "intel_lrc.h"
@@ -261,6 +262,31 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
}
+void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ i915_reg_t hwstam;
+
+ /*
+ * Though they added more rings on g4x/ilk, they did not add
+ * per-engine HWSTAM until gen6.
+ */
+ if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
+ return;
+
+ hwstam = RING_HWSTAM(engine->mmio_base);
+ if (INTEL_GEN(dev_priv) >= 3)
+ I915_WRITE(hwstam, mask);
+ else
+ I915_WRITE16(hwstam, mask);
+}
+
+static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
+{
+ /* Mask off all writes into the unknown HWSP */
+ intel_engine_set_hwsp_writemask(engine, ~0u);
+}
+
static int
intel_engine_setup(struct drm_i915_private *dev_priv,
enum intel_engine_id id)
@@ -312,6 +338,9 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+ /* Scrub mmio state on takeover */
+ intel_engine_sanitize_mmio(engine);
+
dev_priv->engine_class[info->class][info->instance] = engine;
dev_priv->engine[id] = engine;
return 0;
@@ -365,7 +394,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
goto cleanup;
}
- device_info->num_rings = hweight32(mask);
+ RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
i915_check_and_clear_faults(dev_priv);
@@ -426,27 +455,9 @@ cleanup:
return err;
}
-void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- /* Our semaphore implementation is strictly monotonic (i.e. we proceed
- * so long as the semaphore value in the register/page is greater
- * than the sync value), so whenever we reset the seqno,
- * so long as we reset the tracking semaphore value to 0, it will
- * always be before the next request's seqno. If we don't reset
- * the semaphore value, then when the seqno moves backwards all
- * future waits will complete instantly (causing rendering corruption).
- */
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
- I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
- I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
- if (HAS_VEBOX(dev_priv))
- I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
- }
-
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
- clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
/* After manually advancing the seqno, fake the interrupt in case
* there are any waiters for that seqno.
@@ -495,6 +506,9 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
static void cleanup_status_page(struct intel_engine_cs *engine)
{
+ /* Prevent writes into HWSP after returning the page to the system */
+ intel_engine_set_hwsp_writemask(engine, ~0u);
+
if (HWS_NEEDS_PHYSICAL(engine->i915)) {
void *addr = fetch_and_zero(&engine->status_page.page_addr);
@@ -769,12 +783,12 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
{
- const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 mcr_s_ss_select;
u32 slice = fls(sseu->slice_mask);
u32 subslice = fls(sseu->subslice_mask[slice]);
- if (IS_GEN10(dev_priv))
+ if (IS_GEN(dev_priv, 10))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
GEN8_MCR_SUBSLICE(subslice);
else if (INTEL_GEN(dev_priv) >= 11)
@@ -786,15 +800,15 @@ u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
return mcr_s_ss_select;
}
-static inline uint32_t
+static inline u32
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
- uint32_t mcr_slice_subslice_mask;
- uint32_t mcr_slice_subslice_select;
- uint32_t default_mcr_s_ss_select;
- uint32_t mcr;
- uint32_t ret;
+ u32 mcr_slice_subslice_mask;
+ u32 mcr_slice_subslice_select;
+ u32 default_mcr_s_ss_select;
+ u32 mcr;
+ u32 ret;
enum forcewake_domains fw_domains;
if (INTEL_GEN(dev_priv) >= 11) {
@@ -900,10 +914,15 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
static bool ring_is_idle(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ intel_wakeref_t wakeref;
bool idle = true;
+ if (I915_SELFTEST_ONLY(!engine->mmio_base))
+ return true;
+
/* If the whole device is asleep, the engine must be idle */
- if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ if (!wakeref)
return true;
/* First check that no commands are left in the ring */
@@ -915,7 +934,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
idle = false;
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
return idle;
}
@@ -939,9 +958,6 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
return false;
- if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
- return true;
-
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
@@ -967,10 +983,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return false;
/* Ring stopped? */
- if (!ring_is_idle(engine))
- return false;
-
- return true;
+ return ring_is_idle(engine);
}
bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
@@ -1030,22 +1043,34 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915)
engine->set_default_submission(engine);
}
+static bool reset_engines(struct drm_i915_private *i915)
+{
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ return false;
+
+ return intel_gpu_reset(i915, ALL_ENGINES) == 0;
+}
+
/**
* intel_engines_sanitize: called after the GPU has lost power
* @i915: the i915 device
+ * @force: ignore a failed reset and sanitize engine state anyway
*
* Anytime we reset the GPU, either with an explicit GPU reset or through a
* PCI power cycle, the GPU loses state and we must reset our state tracking
* to match. Note that calling intel_engines_sanitize() if the GPU has not
* been reset results in much confusion!
*/
-void intel_engines_sanitize(struct drm_i915_private *i915)
+void intel_engines_sanitize(struct drm_i915_private *i915, bool force)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
GEM_TRACE("\n");
+ if (!reset_engines(i915) && !force)
+ return;
+
for_each_engine(engine, i915, id) {
if (engine->reset.reset)
engine->reset.reset(engine, NULL);
@@ -1248,7 +1273,7 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
&engine->execlists;
u64 addr;
- if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
+ if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
I915_READ(RING_START(engine->mmio_base)));
@@ -1269,16 +1294,6 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
}
- if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
- drm_printf(m, "\tSYNC_0: 0x%08x\n",
- I915_READ(RING_SYNC_0(engine->mmio_base)));
- drm_printf(m, "\tSYNC_1: 0x%08x\n",
- I915_READ(RING_SYNC_1(engine->mmio_base)));
- if (HAS_VEBOX(dev_priv))
- drm_printf(m, "\tSYNC_2: 0x%08x\n",
- I915_READ(RING_SYNC_2(engine->mmio_base)));
- }
-
addr = intel_engine_get_active_head(engine);
drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
@@ -1405,14 +1420,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
{
- const int MAX_REQUESTS_TO_SHOW = 8;
struct intel_breadcrumbs * const b = &engine->breadcrumbs;
- const struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_gpu_error * const error = &engine->i915->gpu_error;
- struct i915_request *rq, *last;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
unsigned long flags;
struct rb_node *rb;
- int count;
if (header) {
va_list ap;
@@ -1468,59 +1481,17 @@ void intel_engine_dump(struct intel_engine_cs *engine,
rcu_read_unlock();
- if (intel_runtime_pm_get_if_in_use(engine->i915)) {
+ wakeref = intel_runtime_pm_get_if_in_use(engine->i915);
+ if (wakeref) {
intel_engine_print_registers(engine, m);
- intel_runtime_pm_put(engine->i915);
+ intel_runtime_pm_put(engine->i915, wakeref);
} else {
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
}
- local_irq_save(flags);
- spin_lock(&engine->timeline.lock);
+ intel_execlists_show_requests(engine, m, print_request, 8);
- last = NULL;
- count = 0;
- list_for_each_entry(rq, &engine->timeline.requests, link) {
- if (count++ < MAX_REQUESTS_TO_SHOW - 1)
- print_request(m, rq, "\t\tE ");
- else
- last = rq;
- }
- if (last) {
- if (count > MAX_REQUESTS_TO_SHOW) {
- drm_printf(m,
- "\t\t...skipping %d executing requests...\n",
- count - MAX_REQUESTS_TO_SHOW);
- }
- print_request(m, last, "\t\tE ");
- }
-
- last = NULL;
- count = 0;
- drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
- for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
- struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
- int i;
-
- priolist_for_each_request(rq, p, i) {
- if (count++ < MAX_REQUESTS_TO_SHOW - 1)
- print_request(m, rq, "\t\tQ ");
- else
- last = rq;
- }
- }
- if (last) {
- if (count > MAX_REQUESTS_TO_SHOW) {
- drm_printf(m,
- "\t\t...skipping %d queued requests...\n",
- count - MAX_REQUESTS_TO_SHOW);
- }
- print_request(m, last, "\t\tQ ");
- }
-
- spin_unlock(&engine->timeline.lock);
-
- spin_lock(&b->rb_lock);
+ spin_lock_irqsave(&b->rb_lock, flags);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
@@ -1529,13 +1500,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
task_state_to_char(w->tsk),
w->seqno);
}
- spin_unlock(&b->rb_lock);
- local_irq_restore(flags);
-
- drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n",
- engine->irq_posted,
- yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
- &engine->irq_posted)));
+ spin_unlock_irqrestore(&b->rb_lock, flags);
drm_printf(m, "HWSP:\n");
hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index f23570c44323..ec72be4b7a7b 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -84,7 +84,7 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
int lines;
intel_fbc_get_plane_source_size(cache, NULL, &lines);
- if (IS_GEN7(dev_priv))
+ if (IS_GEN(dev_priv, 7))
lines = min(lines, 2048);
else if (INTEL_GEN(dev_priv) >= 8)
lines = min(lines, 2560);
@@ -127,7 +127,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
cfb_pitch = params->fb.stride;
/* FBC_CTL wants 32B or 64B units */
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
cfb_pitch = (cfb_pitch / 32) - 1;
else
cfb_pitch = (cfb_pitch / 64) - 1;
@@ -136,7 +136,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG(i), 0);
- if (IS_GEN4(dev_priv)) {
+ if (IS_GEN(dev_priv, 4)) {
u32 fbc_ctl2;
/* Set it up... */
@@ -233,9 +233,9 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
if (params->flags & PLANE_HAS_FENCE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
dpfc_ctl |= params->vma->fence->id;
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE |
params->vma->fence->id);
@@ -243,7 +243,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
params->crtc.fence_y_offset);
}
} else {
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
I915_WRITE(SNB_DPFC_CTL_SA, 0);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
}
@@ -282,7 +282,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
int threshold = dev_priv->fbc.threshold;
/* Display WA #0529: skl, kbl, bxt. */
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv)) {
+ if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv)) {
u32 val = I915_READ(CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
@@ -581,10 +581,10 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
if (stride < 512)
return false;
- if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
+ if (IS_GEN(dev_priv, 2) || IS_GEN(dev_priv, 3))
return stride == 4096 || stride == 8192;
- if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
+ if (IS_GEN(dev_priv, 4) && !IS_G4X(dev_priv) && stride < 2048)
return false;
if (stride > 16384)
@@ -594,7 +594,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
}
static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
- uint32_t pixel_format)
+ u32 pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_XRGB8888:
@@ -603,7 +603,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
return false;
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
@@ -626,7 +626,10 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
struct intel_fbc *fbc = &dev_priv->fbc;
unsigned int effective_w, effective_h, max_w, max_h;
- if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ max_w = 5120;
+ max_h = 4096;
+ } else if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
max_w = 4096;
max_h = 4096;
} else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
@@ -784,7 +787,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* having a Y offset that isn't divisible by 4 causes FIFO underrun
* and screen flicker.
*/
- if (IS_GEN(dev_priv, 9, 10) &&
+ if (IS_GEN_RANGE(dev_priv, 9, 10) &&
(fbc->state_cache.plane.adjusted_y & 3)) {
fbc->no_fbc_reason = "plane Y offset is misaligned";
return false;
@@ -839,7 +842,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
+ if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w,
32 * fbc->threshold) * 8;
}
@@ -1126,8 +1129,6 @@ void intel_fbc_disable(struct intel_crtc *crtc)
if (!fbc_supported(dev_priv))
return;
- WARN_ON(crtc->active);
-
mutex_lock(&fbc->lock);
if (fbc->crtc == crtc)
__intel_fbc_disable(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fb5bb5b32a60..3036d835bc2b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -37,7 +37,6 @@
#include <linux/init.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include "intel_drv.h"
@@ -178,8 +177,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
const struct i915_ggtt_view view = {
.type = I915_GGTT_VIEW_NORMAL,
};
- struct fb_info *info;
struct drm_framebuffer *fb;
+ intel_wakeref_t wakeref;
+ struct fb_info *info;
struct i915_vma *vma;
unsigned long flags = 0;
bool prealloc = false;
@@ -210,7 +210,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
}
mutex_lock(&dev->struct_mutex);
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
/* Pin the GGTT vma for our access via info->screen_base.
* This also validates that any existing fb inherited from the
@@ -277,7 +277,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev->struct_mutex);
vga_switcheroo_client_fb_set(pdev, info);
return 0;
@@ -285,7 +285,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_unpin:
intel_unpin_fb_vma(vma, flags);
out_unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 77c123cc8817..3b9285130ef5 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -127,8 +127,8 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
- DE_PIPEB_FIFO_UNDERRUN;
+ u32 bit = (pipe == PIPE_A) ?
+ DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN;
if (enable)
ilk_enable_display_irq(dev_priv, bit);
@@ -140,7 +140,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- uint32_t err_int = I915_READ(GEN7_ERR_INT);
+ u32 err_int = I915_READ(GEN7_ERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
@@ -193,8 +193,8 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
bool enable)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t bit = (pch_transcoder == PIPE_A) ?
- SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
+ u32 bit = (pch_transcoder == PIPE_A) ?
+ SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
ibx_enable_display_interrupt(dev_priv, bit);
@@ -206,7 +206,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
- uint32_t serr_int = I915_READ(SERR_INT);
+ u32 serr_int = I915_READ(SERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
@@ -260,9 +260,9 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
- else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+ else if (IS_GEN_RANGE(dev_priv, 5, 6))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
else if (INTEL_GEN(dev_priv) >= 8)
broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
@@ -423,7 +423,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
if (HAS_GMCH_DISPLAY(dev_priv))
i9xx_check_fifo_underruns(crtc);
- else if (IS_GEN7(dev_priv))
+ else if (IS_GEN(dev_priv, 7))
ivybridge_check_fifo_underruns(crtc);
}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index c3379bde266f..16f253deaf8d 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -60,7 +60,6 @@
* functions is deprecated and should be avoided.
*/
-#include <drm/drmP.h>
#include "intel_drv.h"
#include "intel_frontbuffer.h"
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 0f1c4f9ebfd8..744220296653 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -192,4 +192,7 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
spin_unlock_irq(&guc->irq_lock);
}
+int intel_guc_reset_engine(struct intel_guc *guc,
+ struct intel_engine_cs *engine);
+
#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index a67144ee5ceb..13ff7003c6be 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -77,10 +77,6 @@ static void guc_fw_select(struct intel_uc_fw *guc_fw)
guc_fw->path = I915_KBL_GUC_UCODE;
guc_fw->major_ver_wanted = KBL_FW_MAJOR;
guc_fw->minor_ver_wanted = KBL_FW_MINOR;
- } else {
- dev_info(dev_priv->drm.dev,
- "%s: No firmware known for this platform!\n",
- intel_uc_fw_type_repr(guc_fw->type));
}
}
@@ -115,7 +111,7 @@ static void guc_prepare_xfer(struct intel_guc *guc)
else
I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
- if (IS_GEN9(dev_priv)) {
+ if (IS_GEN(dev_priv, 9)) {
/* DOP Clock Gating Enable for GuC clocks */
I915_WRITE(GEN7_MISCCPCTL, (GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
I915_READ(GEN7_MISCCPCTL)));
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index d3ebdbc0182e..b53582c0c6c1 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -436,6 +436,7 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ intel_wakeref_t wakeref;
guc_read_update_log_buffer(log);
@@ -443,9 +444,8 @@ static void guc_log_capture_logs(struct intel_guc_log *log)
* Generally device is expected to be active only at this
* time, so get/put should be really quick.
*/
- intel_runtime_pm_get(dev_priv);
- guc_action_flush_log_complete(guc);
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ guc_action_flush_log_complete(guc);
}
int intel_guc_log_create(struct intel_guc_log *log)
@@ -505,7 +505,8 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *dev_priv = guc_to_i915(guc);
- int ret;
+ intel_wakeref_t wakeref;
+ int ret = 0;
BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0);
GEM_BUG_ON(!log->vma);
@@ -519,16 +520,14 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
mutex_lock(&dev_priv->drm.struct_mutex);
- if (log->level == level) {
- ret = 0;
+ if (log->level == level)
goto out_unlock;
- }
- intel_runtime_pm_get(dev_priv);
- ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level),
- GUC_LOG_LEVEL_IS_ENABLED(level),
- GUC_LOG_LEVEL_TO_VERBOSITY(level));
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ ret = guc_action_control_log(guc,
+ GUC_LOG_LEVEL_IS_VERBOSE(level),
+ GUC_LOG_LEVEL_IS_ENABLED(level),
+ GUC_LOG_LEVEL_TO_VERBOSITY(level));
if (ret) {
DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
goto out_unlock;
@@ -601,6 +600,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
struct drm_i915_private *i915 = guc_to_i915(guc);
+ intel_wakeref_t wakeref;
/*
* Before initiating the forceful flush, wait for any pending/ongoing
@@ -608,9 +608,8 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
*/
flush_work(&log->relay.flush_work);
- intel_runtime_pm_get(i915);
- guc_action_flush_log(guc);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ guc_action_flush_log(guc);
/* GuC would have updated log buffer by now, so capture it */
guc_log_capture_logs(log);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 1570dcbe249c..ab1c49b106f2 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -572,7 +572,8 @@ static void inject_preempt_context(struct work_struct *work)
if (engine->id == RCS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
- addr);
+ addr,
+ PIPE_CONTROL_CS_STALL);
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index e26d05a46451..741441daae32 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -23,144 +23,7 @@
*/
#include "i915_drv.h"
-
-static bool
-ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
-{
- ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
- return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER);
-}
-
-static struct intel_engine_cs *
-semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
- u64 offset)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
- struct intel_engine_cs *signaller;
- enum intel_engine_id id;
-
- for_each_engine(signaller, dev_priv, id) {
- if (engine == signaller)
- continue;
-
- if (sync_bits == signaller->semaphore.mbox.wait[engine->hw_id])
- return signaller;
- }
-
- DRM_DEBUG_DRIVER("No signaller ring found for %s, ipehr 0x%08x\n",
- engine->name, ipehr);
-
- return ERR_PTR(-ENODEV);
-}
-
-static struct intel_engine_cs *
-semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- void __iomem *vaddr;
- u32 cmd, ipehr, head;
- u64 offset = 0;
- int i, backwards;
-
- /*
- * This function does not support execlist mode - any attempt to
- * proceed further into this function will result in a kernel panic
- * when dereferencing ring->buffer, which is not set up in execlist
- * mode.
- *
- * The correct way of doing it would be to derive the currently
- * executing ring buffer from the current context, which is derived
- * from the currently running request. Unfortunately, to get the
- * current request we would have to grab the struct_mutex before doing
- * anything else, which would be ill-advised since some other thread
- * might have grabbed it already and managed to hang itself, causing
- * the hang checker to deadlock.
- *
- * Therefore, this function does not support execlist mode in its
- * current form. Just return NULL and move on.
- */
- if (engine->buffer == NULL)
- return NULL;
-
- ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine, ipehr))
- return NULL;
-
- /*
- * HEAD is likely pointing to the dword after the actual command,
- * so scan backwards until we find the MBOX. But limit it to just 3
- * or 4 dwords depending on the semaphore wait command size.
- * Note that we don't care about ACTHD here since that might
- * point at at batch, and semaphores are always emitted into the
- * ringbuffer itself.
- */
- head = I915_READ_HEAD(engine) & HEAD_ADDR;
- backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
- vaddr = (void __iomem *)engine->buffer->vaddr;
-
- for (i = backwards; i; --i) {
- /*
- * Be paranoid and presume the hw has gone off into the wild -
- * our ring is smaller than what the hardware (and hence
- * HEAD_ADDR) allows. Also handles wrap-around.
- */
- head &= engine->buffer->size - 1;
-
- /* This here seems to blow up */
- cmd = ioread32(vaddr + head);
- if (cmd == ipehr)
- break;
-
- head -= 4;
- }
-
- if (!i)
- return NULL;
-
- *seqno = ioread32(vaddr + head + 4) + 1;
- return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
-}
-
-static int semaphore_passed(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct intel_engine_cs *signaller;
- u32 seqno;
-
- engine->hangcheck.deadlock++;
-
- signaller = semaphore_waits_for(engine, &seqno);
- if (signaller == NULL)
- return -1;
-
- if (IS_ERR(signaller))
- return 0;
-
- /* Prevent pathological recursion due to driver bugs */
- if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
- return -1;
-
- if (intel_engine_signaled(signaller, seqno))
- return 1;
-
- /* cursory check for an unkickable deadlock */
- if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
- semaphore_passed(signaller) < 0)
- return -1;
-
- return 0;
-}
-
-static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id)
- engine->hangcheck.deadlock = 0;
-}
+#include "i915_reset.h"
static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
{
@@ -236,7 +99,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
if (ha != ENGINE_DEAD)
return ha;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
return ENGINE_DEAD;
/* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -252,37 +115,12 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
return ENGINE_WAIT_KICK;
}
- if (IS_GEN(dev_priv, 6, 7) && tmp & RING_WAIT_SEMAPHORE) {
- switch (semaphore_passed(engine)) {
- default:
- return ENGINE_DEAD;
- case 1:
- i915_handle_error(dev_priv, ALL_ENGINES, 0,
- "stuck semaphore on %s",
- engine->name);
- I915_WRITE_CTL(engine, tmp);
- return ENGINE_WAIT_KICK;
- case 0:
- return ENGINE_WAIT;
- }
- }
-
return ENGINE_DEAD;
}
static void hangcheck_load_sample(struct intel_engine_cs *engine,
struct intel_engine_hangcheck *hc)
{
- /* We don't strictly need an irq-barrier here, as we are not
- * serving an interrupt request, be paranoid in case the
- * barrier has side-effects (such as preventing a broken
- * cacheline snoop) and so be sure that we can see the seqno
- * advance. If the seqno should stick, due to a stale
- * cacheline, we would erroneously declare the GPU hung.
- */
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
hc->acthd = intel_engine_get_active_head(engine);
hc->seqno = intel_engine_get_seqno(engine);
}
@@ -357,10 +195,6 @@ static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
break;
case ENGINE_DEAD:
- if (GEM_SHOW_DEBUG()) {
- struct drm_printer p = drm_debug_printer("hangcheck");
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
break;
default:
@@ -433,8 +267,6 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
for_each_engine(engine, dev_priv, id) {
struct intel_engine_hangcheck hc;
- semaphore_clear_deadlocks(dev_priv);
-
hangcheck_load_sample(engine, &hc);
hangcheck_accumulate_sample(engine, &hc);
hangcheck_store_sample(engine, &hc);
@@ -449,6 +281,17 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
wedged |= intel_engine_flag(engine);
}
+ if (GEM_SHOW_DEBUG() && (hung | stuck)) {
+ struct drm_printer p = drm_debug_printer("hangcheck");
+
+ for_each_engine(engine, dev_priv, id) {
+ if (intel_engine_is_idle(engine))
+ continue;
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ }
+ }
+
if (wedged) {
dev_err(dev_priv->drm.dev,
"GPU recovery timed out,"
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index 1bf487f94254..ce7ba3a9c000 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -6,7 +6,6 @@
* Sean Paul <seanpaul@chromium.org>
*/
-#include <drm/drmP.h>
#include <drm/drm_hdcp.h>
#include <linux/i2c.h>
#include <linux/random.h>
@@ -15,6 +14,7 @@
#include "i915_reg.h"
#define KEY_LOAD_TRIES 5
+#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
@@ -157,10 +157,11 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
/*
* Initiate loading the HDCP key from fuses.
*
- * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
- * differ in the key load trigger process from other platforms.
+ * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
+ * platforms except BXT and GLK, differ in the key load trigger process
+ * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
*/
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
ret = sandybridge_pcode_write(dev_priv,
SKL_PCODE_LOAD_HDCP_KEYS, 1);
@@ -636,7 +637,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
/* Wait for encryption confirmation */
if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
- HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
+ HDCP_STATUS_ENC, HDCP_STATUS_ENC,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -666,7 +668,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
I915_WRITE(PORT_HDCP_CONF(port), 0);
if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
- 20)) {
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
@@ -768,8 +770,7 @@ static void intel_hdcp_prop_work(struct work_struct *work)
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
/* PORT E doesn't have HDCP, and PORT F is disabled */
- return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
- !IS_CHERRYVIEW(dev_priv) && port < PORT_E);
+ return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
}
int intel_hdcp_init(struct intel_connector *connector,
@@ -837,8 +838,8 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state)
{
- uint64_t old_cp = old_state->content_protection;
- uint64_t new_cp = new_state->content_protection;
+ u64 old_cp = old_state->content_protection;
+ u64 new_cp = new_state->content_protection;
struct drm_crtc_state *crtc_state;
if (!new_state->crtc) {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 1da7bb148fca..97a98e1bea56 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -30,7 +30,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hdmi.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -1187,15 +1186,17 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ intel_wakeref_t wakeref;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
ret = intel_sdvo_port_enabled(dev_priv, intel_hdmi->hdmi_reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -1891,11 +1892,12 @@ intel_hdmi_set_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+ intel_wakeref_t wakeref;
struct edid *edid;
bool connected = false;
struct i2c_adapter *i2c;
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
@@ -1910,7 +1912,7 @@ intel_hdmi_set_edid(struct drm_connector *connector)
intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
to_intel_connector(connector)->detect_edid = edid;
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
@@ -1932,11 +1934,12 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
+ intel_wakeref_t wakeref;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (IS_ICELAKE(dev_priv) &&
!intel_digital_port_connected(encoder))
@@ -1948,7 +1951,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
status = connector_status_connected;
out:
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
if (status != connector_status_connected)
cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index e24174d08fed..e027d2b4abe5 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -23,7 +23,6 @@
#include <linux/kernel.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -227,9 +226,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
+ intel_wakeref_t wakeref;
enum hpd_pin pin;
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
for_each_hpd_pin(pin) {
@@ -262,7 +262,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
}
bool intel_encoder_hotplug(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index bc27b691d824..9bd1c9002c2a 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -115,14 +115,14 @@ fail:
int intel_huc_check_status(struct intel_huc *huc)
{
struct drm_i915_private *dev_priv = huc_to_i915(huc);
- bool status;
+ intel_wakeref_t wakeref;
+ bool status = false;
if (!HAS_HUC(dev_priv))
return -ENODEV;
- intel_runtime_pm_get(dev_priv);
- status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
- intel_runtime_pm_put(dev_priv);
+ with_intel_runtime_pm(dev_priv, wakeref)
+ status = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
return status;
}
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index f93d2384d482..7d7bfc7f7ca7 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -23,8 +23,8 @@
*/
#define BXT_HUC_FW_MAJOR 01
-#define BXT_HUC_FW_MINOR 07
-#define BXT_BLD_NUM 1398
+#define BXT_HUC_FW_MINOR 8
+#define BXT_BLD_NUM 2893
#define SKL_HUC_FW_MAJOR 01
#define SKL_HUC_FW_MINOR 07
@@ -76,9 +76,6 @@ static void huc_fw_select(struct intel_uc_fw *huc_fw)
huc_fw->path = I915_KBL_HUC_UCODE;
huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
- } else {
- DRM_WARN("%s: No firmware known for this platform!\n",
- intel_uc_fw_type_repr(huc_fw->type));
}
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 802d0394ccc4..4f6dc8c94634 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -29,7 +29,6 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/export.h>
-#include <drm/drmP.h>
#include <drm/drm_hdcp.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
@@ -698,12 +697,13 @@ out:
static int
gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
- struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
- adapter);
+ struct intel_gmbus *bus =
+ container_of(adapter, struct intel_gmbus, adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
+ intel_wakeref_t wakeref;
int ret;
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
if (bus->force_bit) {
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
@@ -715,17 +715,16 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
bus->force_bit |= GMBUS_FORCE_BIT_RETRY;
}
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
{
- struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus,
- adapter);
+ struct intel_gmbus *bus =
+ container_of(adapter, struct intel_gmbus, adapter);
struct drm_i915_private *dev_priv = bus->dev_priv;
- int ret;
u8 cmd = DRM_HDCP_DDC_AKSV;
u8 buf[DRM_HDCP_KSV_LEN] = { 0 };
struct i2c_msg msgs[] = {
@@ -742,8 +741,10 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
.buf = buf,
}
};
+ intel_wakeref_t wakeref;
+ int ret;
- intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
mutex_lock(&dev_priv->gmbus_mutex);
/*
@@ -754,7 +755,7 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter)
ret = do_gmbus_xfer(adapter, msgs, ARRAY_SIZE(msgs), GMBUS_AKSV_SELECT);
mutex_unlock(&dev_priv->gmbus_mutex);
- intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d84c7815ee0c..436e59724900 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -133,7 +133,6 @@
*/
#include <linux/interrupt.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem_render_state.h"
@@ -363,31 +362,12 @@ execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
trace_i915_request_out(rq);
}
-static void
-execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
-{
- ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
- ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
-}
-
static u64 execlists_update_context(struct i915_request *rq)
{
- struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
struct intel_context *ce = rq->hw_context;
- u32 *reg_state = ce->lrc_reg_state;
- reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
-
- /*
- * True 32b PPGTT with dynamic page allocation: update PDP
- * registers and point the unallocated PDPs to scratch page.
- * PML4 is allocated during ppgtt init, so this is not needed
- * in 48-bit mode.
- */
- if (!i915_vm_is_48bit(&ppgtt->vm))
- execlists_update_context_pdps(ppgtt, reg_state);
+ ce->lrc_reg_state[CTX_RING_TAIL + 1] =
+ intel_ring_set_tail(rq->ring, rq->tail);
/*
* Make sure the context image is complete before we submit it to HW.
@@ -770,6 +750,13 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
execlists_clear_all_active(execlists);
}
+static inline void
+invalidate_csb_entries(const u32 *first, const u32 *last)
+{
+ clflush((void *)first);
+ clflush((void *)last);
+}
+
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
{
const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
@@ -785,6 +772,9 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
*/
execlists->csb_head = reset_value;
WRITE_ONCE(*execlists->csb_write, reset_value);
+
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[GEN8_CSB_ENTRIES - 1]);
}
static void nop_submission_tasklet(unsigned long data)
@@ -826,7 +816,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
list_for_each_entry(rq, &engine->timeline.requests, link) {
GEM_BUG_ON(!rq->global_seqno);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ if (i915_request_signaled(rq))
continue;
dma_fence_set_error(&rq->fence, -EIO);
@@ -1020,6 +1010,19 @@ static void process_csb(struct intel_engine_cs *engine)
} while (head != tail);
execlists->csb_head = head;
+
+ /*
+ * Gen11 has proven to fail wrt global observation point between
+ * entry and tail update, failing on the ordering and thus
+ * we see an old entry in the context status buffer.
+ *
+ * Forcibly evict out entries for the next gpu csb update,
+ * to increase the odds that we get a fresh entries with non
+ * working hardware. The cost for doing so comes out mostly with
+ * the wash as hardware, working or not, will need to do the
+ * invalidation before.
+ */
+ invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]);
}
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
@@ -1042,7 +1045,7 @@ static void execlists_submission_tasklet(unsigned long data)
GEM_TRACE("%s awake?=%d, active=%x\n",
engine->name,
- engine->i915->gt.awake,
+ !!engine->i915->gt.awake,
engine->execlists.active);
spin_lock_irqsave(&engine->timeline.lock, flags);
@@ -1247,29 +1250,88 @@ execlists_context_pin(struct intel_engine_cs *engine,
return __execlists_context_pin(engine, ctx, ce);
}
+static int emit_pdps(struct i915_request *rq)
+{
+ const struct intel_engine_cs * const engine = rq->engine;
+ struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
+ int err, i;
+ u32 *cs;
+
+ GEM_BUG_ON(intel_vgpu_active(rq->i915));
+
+ /*
+ * Beware ye of the dragons, this sequence is magic!
+ *
+ * Small changes to this sequence can cause anything from
+ * GPU hangs to forcewake errors and machine lockups!
+ */
+
+ /* Flush any residual operations from the context load */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Magic required to prevent forcewake errors! */
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Ensure the LRI have landed before we invalidate & continue */
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ /* Be doubly sure the LRI have landed before proceeding */
+ err = engine->emit_flush(rq, EMIT_FLUSH);
+ if (err)
+ return err;
+
+ /* Re-invalidate the TLB for luck */
+ return engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
static int execlists_request_alloc(struct i915_request *request)
{
int ret;
GEM_BUG_ON(!request->hw_context->pin_count);
- /* Flush enough space to reduce the likelihood of waiting after
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
- ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
- if (ret)
- return ret;
-
- /* Note that after this point, we have committed to using
+ /*
+ * Note that after this point, we have committed to using
* this request as it is being used to both track the
* state of engine initialisation and liveness of the
* golden renderstate above. Think twice before you try
* to cancel/unwind this request now.
*/
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
+ else
+ ret = emit_pdps(request);
+ if (ret)
+ return ret;
+
request->reserved_space -= EXECLISTS_REQUEST_SIZE;
return 0;
}
@@ -1592,7 +1654,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
+ intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
/*
* Make sure we're not enabling the new 12-deep CSB
@@ -1633,6 +1695,7 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
intel_engine_apply_workarounds(engine);
+ intel_engine_apply_whitelist(engine);
intel_mocs_init_engine(engine);
@@ -1649,43 +1712,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
return 0;
}
-static int gen8_init_render_ring(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen8_init_common_ring(engine);
- if (ret)
- return ret;
-
- intel_engine_apply_whitelist(engine);
-
- /* We need to disable the AsyncFlip performance optimisations in order
- * to use MI_WAIT_FOR_EVENT within the CS. It should already be
- * programmed to '1' on all products.
- *
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
- */
- I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
-
- I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
-
- return 0;
-}
-
-static int gen9_init_render_ring(struct intel_engine_cs *engine)
-{
- int ret;
-
- ret = gen8_init_common_ring(engine);
- if (ret)
- return ret;
-
- intel_engine_apply_whitelist(engine);
-
- return 0;
-}
-
static struct i915_request *
execlists_reset_prepare(struct intel_engine_cs *engine)
{
@@ -1841,56 +1867,11 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
atomic_read(&execlists->tasklet.count));
}
-static int intel_logical_ring_emit_pdps(struct i915_request *rq)
-{
- struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
- struct intel_engine_cs *engine = rq->engine;
- const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
- u32 *cs;
- int i;
-
- cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
- for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
- const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
-
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
- *cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
- *cs++ = lower_32_bits(pd_daddr);
- }
-
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- return 0;
-}
-
static int gen8_emit_bb_start(struct i915_request *rq,
u64 offset, u32 len,
const unsigned int flags)
{
u32 *cs;
- int ret;
-
- /* Don't rely in hw updating PDPs, specially in lite-restore.
- * Ideally, we should set Force PD Restore in ctx descriptor,
- * but we can't. Force Restore would be a second option, but
- * it is unsafe in case of lite-restore (because the ctx is
- * not idle). PML4 is allocated during ppgtt init so this is
- * not needed in 48-bit.*/
- if ((intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
- !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
- !intel_vgpu_active(rq->i915)) {
- ret = intel_logical_ring_emit_pdps(rq);
- if (ret)
- return ret;
-
- rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
- }
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -1923,6 +1904,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_NOOP;
+
intel_ring_advance(rq, cs);
return 0;
@@ -2007,7 +1989,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
* On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
* pipe control.
*/
- if (IS_GEN9(request->i915))
+ if (IS_GEN(request->i915, 9))
vf_flush_wa = true;
/* WaForGAMHang:kbl */
@@ -2078,10 +2060,18 @@ static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
- cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
- intel_hws_seqno_address(request->engine));
+ cs = gen8_emit_ggtt_write_rcs(cs,
+ request->global_seqno,
+ intel_hws_seqno_address(request->engine),
+ PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_CS_STALL);
+
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+
request->tail = intel_ring_offset(request, cs);
assert_ring_tail_valid(request->ring, request->tail);
@@ -2244,6 +2234,8 @@ static int logical_ring_init(struct intel_engine_cs *engine)
if (ret)
return ret;
+ intel_engine_init_workarounds(engine);
+
if (HAS_LOGICAL_RING_ELSQ(i915)) {
execlists->submit_reg = i915->regs +
i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
@@ -2276,19 +2268,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
int logical_render_ring_init(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
int ret;
logical_ring_setup(engine);
- if (HAS_L3_DPF(dev_priv))
- engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
/* Override some for render ring. */
- if (INTEL_GEN(dev_priv) >= 9)
- engine->init_hw = gen9_init_render_ring;
- else
- engine->init_hw = gen8_init_render_ring;
engine->init_context = gen8_init_rcs_context;
engine->emit_flush = gen8_emit_flush_render;
engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
@@ -2310,7 +2294,6 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
}
intel_engine_init_whitelist(engine);
- intel_engine_init_workarounds(engine);
return 0;
}
@@ -2325,9 +2308,9 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine)
static u32
make_rpcs(struct drm_i915_private *dev_priv)
{
- bool subslice_pg = INTEL_INFO(dev_priv)->sseu.has_subslice_pg;
- u8 slices = hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask);
- u8 subslices = hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]);
+ bool subslice_pg = RUNTIME_INFO(dev_priv)->sseu.has_subslice_pg;
+ u8 slices = hweight8(RUNTIME_INFO(dev_priv)->sseu.slice_mask);
+ u8 subslices = hweight8(RUNTIME_INFO(dev_priv)->sseu.subslice_mask[0]);
u32 rpcs = 0;
/*
@@ -2362,7 +2345,7 @@ make_rpcs(struct drm_i915_private *dev_priv)
* subslices are enabled, or a count between one and four on the first
* slice.
*/
- if (IS_GEN11(dev_priv) && slices == 1 && subslices >= 4) {
+ if (IS_GEN(dev_priv, 11) && slices == 1 && subslices >= 4) {
GEM_BUG_ON(subslices & 1);
subslice_pg = false;
@@ -2375,7 +2358,7 @@ make_rpcs(struct drm_i915_private *dev_priv)
* must make an explicit request through RPCS for full
* enablement.
*/
- if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
+ if (RUNTIME_INFO(dev_priv)->sseu.has_slice_pg) {
u32 mask, val = slices;
if (INTEL_GEN(dev_priv) >= 11) {
@@ -2403,17 +2386,17 @@ make_rpcs(struct drm_i915_private *dev_priv)
rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
}
- if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
+ if (RUNTIME_INFO(dev_priv)->sseu.has_eu_pg) {
u32 val;
- val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
+ val = RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice <<
GEN8_RPCS_EU_MIN_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
val &= GEN8_RPCS_EU_MIN_MASK;
rpcs |= val;
- val = INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
+ val = RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice <<
GEN8_RPCS_EU_MAX_SHIFT;
GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
val &= GEN8_RPCS_EU_MAX_MASK;
@@ -2538,6 +2521,11 @@ static void execlists_init_reg_state(u32 *regs,
* other PDP Descriptors are ignored.
*/
ASSIGN_CTX_PML4(ctx->ppgtt, regs);
+ } else {
+ ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
+ ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
+ ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
+ ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
}
if (rcs) {
@@ -2620,7 +2608,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
{
struct drm_i915_gem_object *ctx_obj;
struct i915_vma *vma;
- uint32_t context_size;
+ u32 context_size;
struct intel_ring *ring;
struct i915_timeline *timeline;
int ret;
@@ -2714,6 +2702,64 @@ void intel_lr_context_resume(struct drm_i915_private *i915)
}
}
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ struct i915_request *rq,
+ const char *prefix),
+ unsigned int max)
+{
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ struct i915_request *rq, *last;
+ unsigned long flags;
+ unsigned int count;
+ struct rb_node *rb;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ last = NULL;
+ count = 0;
+ list_for_each_entry(rq, &engine->timeline.requests, link) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\tE ");
+ else
+ last = rq;
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d executing requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\tE ");
+ }
+
+ last = NULL;
+ count = 0;
+ drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
+ for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+ int i;
+
+ priolist_for_each_request(rq, p, i) {
+ if (count++ < max - 1)
+ show_request(m, rq, "\t\tQ ");
+ else
+ last = rq;
+ }
+ }
+ if (last) {
+ if (count > max) {
+ drm_printf(m,
+ "\t\t...skipping %d queued requests...\n",
+ count - max);
+ }
+ show_request(m, last, "\t\tQ ");
+ }
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_lrc.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index f5a5502ecf70..3d86c27c6b32 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -97,11 +97,19 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
*/
#define LRC_HEADER_PAGES LRC_PPHWSP_PN
+struct drm_printer;
+
struct drm_i915_private;
struct i915_gem_context;
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
-
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
+void intel_execlists_show_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m,
+ void (*show_request)(struct drm_printer *m,
+ struct i915_request *rq,
+ const char *prefix),
+ unsigned int max);
+
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 7d15be5932e0..322bdddda164 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -288,12 +288,12 @@ static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
}
static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
- uint8_t *avi_buf)
+ u8 *avi_buf)
{
u8 avi_if_ctrl;
u8 block_count = 0;
u8 *data;
- uint16_t reg;
+ u16 reg;
ssize_t ret;
while (block_count < 4) {
@@ -335,10 +335,10 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux,
}
static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
- const uint8_t *frame,
+ const u8 *frame,
ssize_t len)
{
- uint8_t avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
+ u8 avi_if[LSPCON_PARADE_AVI_IF_DATA_SIZE] = {1, };
/*
* Parade's frames contains 32 bytes of data, divided
@@ -367,13 +367,13 @@ static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux,
}
static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux,
- const uint8_t *buffer, ssize_t len)
+ const u8 *buffer, ssize_t len)
{
int ret;
- uint32_t val = 0;
- uint32_t retry;
- uint16_t reg;
- const uint8_t *data = buffer;
+ u32 val = 0;
+ u32 retry;
+ u16 reg;
+ const u8 *data = buffer;
reg = LSPCON_MCA_AVI_IF_WRITE_OFFSET;
while (val < len) {
@@ -459,7 +459,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
{
ssize_t ret;
union hdmi_infoframe frame;
- uint8_t buf[VIDEO_DIP_DATA_SIZE];
+ u8 buf[VIDEO_DIP_DATA_SIZE];
struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
struct intel_lspcon *lspcon = &dig_port->lspcon;
const struct drm_display_mode *adjusted_mode =
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3377d813dbb3..46a5dfd5cdf7 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -32,7 +32,6 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -95,15 +94,17 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
+ intel_wakeref_t wakeref;
bool ret;
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
ret = intel_lvds_port_enabled(dev_priv, lvds_encoder->reg, pipe);
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return ret;
}
@@ -279,7 +280,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg.
*/
- if (IS_GEN4(dev_priv)) {
+ if (IS_GEN(dev_priv, 4)) {
/*
* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels.
@@ -919,7 +920,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_encoder->cloneable = 0;
if (HAS_PCH_SPLIT(dev_priv))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
- else if (IS_GEN4(dev_priv))
+ else if (IS_GEN(dev_priv, 4))
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
else
intel_encoder->crtc_mask = (1 << 1);
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 77e9871a8c9a..e976c5ce5479 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -193,7 +193,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
}
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
- if (IS_GEN9(dev_priv)) {
+ if (IS_GEN(dev_priv, 9)) {
int i;
for (i = 0; i < table->size; i++)
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index d89080d75b80..3d99d1271b2b 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -49,7 +49,6 @@
* context handling keep the MOCS in step.
*/
-#include <drm/drmP.h>
#include "i915_drv.h"
int intel_rcs_context_init_mocs(struct i915_request *rq);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index b8f106d9ecf8..30ae96c5c97c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -30,7 +30,6 @@
#include <linux/firmware.h>
#include <acpi/video.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_opregion.h"
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 20ea7c99d13a..c81db81e4416 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -25,7 +25,6 @@
*
* Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -541,7 +540,7 @@ static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 widt
{
u32 sw;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
sw = ALIGN((offset & 31) + width, 32);
else
sw = ALIGN((offset & 63) + width, 64);
@@ -778,7 +777,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
u32 oconfig;
oconfig = OCONF_CC_OUT_8BIT;
- if (IS_GEN4(dev_priv))
+ if (IS_GEN(dev_priv, 4))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
@@ -1012,7 +1011,7 @@ static int check_overlay_src(struct drm_i915_private *dev_priv,
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
- if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
+ if (IS_GEN(dev_priv, 4) && rec->stride_Y < 512)
return -EINVAL;
tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1246,7 +1245,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
attrs->contrast = overlay->contrast;
attrs->saturation = overlay->saturation;
- if (!IS_GEN2(dev_priv)) {
+ if (!IS_GEN(dev_priv, 2)) {
attrs->gamma0 = I915_READ(OGAMC0);
attrs->gamma1 = I915_READ(OGAMC1);
attrs->gamma2 = I915_READ(OGAMC2);
@@ -1270,7 +1269,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
goto out_unlock;
if (overlay->active) {
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e6cd7b55c018..5a39a6347a7a 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -563,7 +563,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
}
- if (IS_GEN4(dev_priv)) {
+ if (IS_GEN(dev_priv, 4)) {
mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else {
level <<= 1;
@@ -929,7 +929,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight.
*/
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
@@ -1203,17 +1203,20 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
struct intel_connector *connector = bl_get_data(bd);
struct drm_device *dev = connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- u32 hw_level;
- int ret;
+ intel_wakeref_t wakeref;
+ int ret = 0;
- intel_runtime_pm_get(dev_priv);
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ u32 hw_level;
- hw_level = intel_panel_get_backlight(connector);
- ret = scale_hw_to_user(connector, hw_level, bd->props.max_brightness);
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- intel_runtime_pm_put(dev_priv);
+ hw_level = intel_panel_get_backlight(connector);
+ ret = scale_hw_to_user(connector,
+ hw_level, bd->props.max_brightness);
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ }
return ret;
}
@@ -1557,7 +1560,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
ctl = I915_READ(BLC_PWM_CTL);
- if (IS_GEN2(dev_priv) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev_priv))
@@ -1886,7 +1889,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.get = vlv_get_backlight;
panel->backlight.hz_to_pwm = vlv_hz_to_pwm;
}
- } else if (IS_GEN4(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 4)) {
panel->backlight.setup = i965_setup_backlight;
panel->backlight.enable = i965_enable_backlight;
panel->backlight.disable = i965_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index f3c9010e332a..a8554dc4f196 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -44,7 +44,7 @@ static const char * const pipe_crc_sources[] = {
};
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
- uint32_t *val)
+ u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
@@ -120,7 +120,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
- uint32_t *val)
+ u32 *val)
{
bool need_stable_symbols = false;
@@ -165,7 +165,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = I915_READ(PORT_DFT2_G4X);
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@@ -190,7 +190,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
- uint32_t *val)
+ u32 *val)
{
bool need_stable_symbols = false;
@@ -244,7 +244,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = I915_READ(PORT_DFT2_G4X);
WARN_ON(!IS_G4X(dev_priv));
@@ -265,7 +265,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = I915_READ(PORT_DFT2_G4X);
switch (pipe) {
case PIPE_A:
@@ -289,7 +289,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- uint32_t tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = I915_READ(PORT_DFT2_G4X);
if (pipe == PIPE_A)
tmp &= ~PIPE_A_SCRAMBLE_RESET;
@@ -304,7 +304,7 @@ static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
- uint32_t *val)
+ u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
@@ -392,7 +392,7 @@ unlock:
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
- uint32_t *val,
+ u32 *val,
bool set_wa)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
@@ -427,13 +427,13 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source *source, u32 *val,
bool set_wa)
{
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
return i8xx_pipe_crc_ctl_reg(source, val);
else if (INTEL_GEN(dev_priv) < 5)
return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
- else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+ else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_pipe_crc_ctl_reg(source, val);
else
return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
@@ -544,13 +544,13 @@ static int
intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
const enum intel_pipe_crc_source source)
{
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
return i8xx_crc_source_valid(dev_priv, source);
else if (INTEL_GEN(dev_priv) < 5)
return i9xx_crc_source_valid(dev_priv, source);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_crc_source_valid(dev_priv, source);
- else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+ else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_crc_source_valid(dev_priv, source);
else
return ivb_crc_source_valid(dev_priv, source);
@@ -589,6 +589,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
+ intel_wakeref_t wakeref;
u32 val = 0; /* shut up gcc */
int ret = 0;
@@ -598,7 +599,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
}
power_domain = POWER_DOMAIN_PIPE(crtc->index);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref) {
DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -624,7 +626,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
pipe_crc->skipped = 0;
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a26b4eddda25..fdc28a3d2936 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -480,7 +480,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
int sprite0_start, sprite1_start;
switch (pipe) {
- uint32_t dsparb, dsparb2, dsparb3;
+ u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
@@ -513,7 +513,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- uint32_t dsparb = I915_READ(DSPARB);
+ u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
@@ -529,7 +529,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv,
static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- uint32_t dsparb = I915_READ(DSPARB);
+ u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x1ff;
@@ -546,7 +546,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv,
static int i845_get_fifo_size(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane)
{
- uint32_t dsparb = I915_READ(DSPARB);
+ u32 dsparb = I915_READ(DSPARB);
int size;
size = dsparb & 0x7f;
@@ -667,9 +667,9 @@ static unsigned int intel_wm_method1(unsigned int pixel_rate,
unsigned int cpp,
unsigned int latency)
{
- uint64_t ret;
+ u64 ret;
- ret = (uint64_t) pixel_rate * cpp * latency;
+ ret = (u64)pixel_rate * cpp * latency;
ret = DIV_ROUND_UP_ULL(ret, 10000);
return ret;
@@ -1089,9 +1089,9 @@ static int g4x_fbc_fifo_size(int level)
}
}
-static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
+static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1188,9 +1188,9 @@ static bool g4x_raw_fbc_wm_set(struct intel_crtc_state *crtc_state,
return dirty;
}
-static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t pri_val);
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 pri_val);
static bool g4x_raw_plane_wm_compute(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -1399,10 +1399,9 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
return 0;
}
-static int g4x_compute_intermediate_wm(struct drm_device *dev,
- struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
+static int g4x_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
struct intel_atomic_state *intel_state =
@@ -1599,9 +1598,9 @@ static void vlv_setup_wm_latency(struct drm_i915_private *dev_priv)
}
}
-static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int level)
+static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int level)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1969,7 +1968,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
spin_lock(&dev_priv->uncore.lock);
switch (crtc->pipe) {
- uint32_t dsparb, dsparb2, dsparb3;
+ u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ_FW(DSPARB);
dsparb2 = I915_READ_FW(DSPARB2);
@@ -2032,10 +2031,9 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
#undef VLV_FIFO
-static int vlv_compute_intermediate_wm(struct drm_device *dev,
- struct intel_crtc *crtc,
- struct intel_crtc_state *new_crtc_state)
+static int vlv_compute_intermediate_wm(struct intel_crtc_state *new_crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
struct intel_atomic_state *intel_state =
@@ -2264,8 +2262,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
{
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
const struct intel_watermark_params *wm_info;
- uint32_t fwater_lo;
- uint32_t fwater_hi;
+ u32 fwater_lo;
+ u32 fwater_hi;
int cwm, srwm = 1;
int fifo_size;
int planea_wm, planeb_wm;
@@ -2273,7 +2271,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (IS_I945GM(dev_priv))
wm_info = &i945_wm_info;
- else if (!IS_GEN2(dev_priv))
+ else if (!IS_GEN(dev_priv, 2))
wm_info = &i915_wm_info;
else
wm_info = &i830_a_wm_info;
@@ -2287,7 +2285,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc->base.primary->state->fb;
int cpp;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2302,7 +2300,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
planea_wm = wm_info->max_wm;
}
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
wm_info = &i830_bc_wm_info;
fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
@@ -2314,7 +2312,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
crtc->base.primary->state->fb;
int cpp;
- if (IS_GEN2(dev_priv))
+ if (IS_GEN(dev_priv, 2))
cpp = 4;
else
cpp = fb->format->cpp[0];
@@ -2408,7 +2406,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
const struct drm_display_mode *adjusted_mode;
- uint32_t fwater_lo;
+ u32 fwater_lo;
int planea_wm;
crtc = single_enabled_crtc(dev_priv);
@@ -2457,8 +2455,7 @@ static unsigned int ilk_wm_method2(unsigned int pixel_rate,
return ret;
}
-static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
- uint8_t cpp)
+static u32 ilk_wm_fbc(u32 pri_val, u32 horiz_pixels, u8 cpp)
{
/*
* Neither of these should be possible since this function shouldn't be
@@ -2475,22 +2472,21 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
}
struct ilk_wm_maximums {
- uint16_t pri;
- uint16_t spr;
- uint16_t cur;
- uint16_t fbc;
+ u16 pri;
+ u16 spr;
+ u16 cur;
+ u16 fbc;
};
/*
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t mem_value,
- bool is_lp)
+static u32 ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 mem_value, bool is_lp)
{
- uint32_t method1, method2;
+ u32 method1, method2;
int cpp;
if (mem_value == 0)
@@ -2518,11 +2514,11 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t mem_value)
+static u32 ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 mem_value)
{
- uint32_t method1, method2;
+ u32 method1, method2;
int cpp;
if (mem_value == 0)
@@ -2545,9 +2541,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
* For both WM_PIPE and WM_LP.
* mem_value must be in 0.1us units.
*/
-static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t mem_value)
+static u32 ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 mem_value)
{
int cpp;
@@ -2565,9 +2561,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
}
/* Only for WM_LP. */
-static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *pstate,
- uint32_t pri_val)
+static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate,
+ u32 pri_val)
{
int cpp;
@@ -2626,13 +2622,12 @@ static unsigned int ilk_fbc_wm_reg_max(const struct drm_i915_private *dev_priv)
}
/* Calculate the maximum primary/sprite plane watermark */
-static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
bool is_sprite)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
unsigned int fifo_size = ilk_display_fifo_size(dev_priv);
/* if sprites aren't enabled, sprites get nothing */
@@ -2668,7 +2663,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
}
/* Calculate the maximum cursor plane watermark */
-static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+static unsigned int ilk_cursor_wm_max(const struct drm_i915_private *dev_priv,
int level,
const struct intel_wm_config *config)
{
@@ -2677,19 +2672,19 @@ static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
return 64;
/* otherwise just report max that registers can hold */
- return ilk_cursor_wm_reg_max(to_i915(dev), level);
+ return ilk_cursor_wm_reg_max(dev_priv, level);
}
-static void ilk_compute_wm_maximums(const struct drm_device *dev,
+static void ilk_compute_wm_maximums(const struct drm_i915_private *dev_priv,
int level,
const struct intel_wm_config *config,
enum intel_ddb_partitioning ddb_partitioning,
struct ilk_wm_maximums *max)
{
- max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
- max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
- max->cur = ilk_cursor_wm_max(dev, level, config);
- max->fbc = ilk_fbc_wm_reg_max(to_i915(dev));
+ max->pri = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(dev_priv, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(dev_priv, level, config);
+ max->fbc = ilk_fbc_wm_reg_max(dev_priv);
}
static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
@@ -2734,9 +2729,9 @@ static bool ilk_validate_wm_level(int level,
DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
level, result->cur_val, max->cur);
- result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
- result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
- result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
+ result->pri_val = min_t(u32, result->pri_val, max->pri);
+ result->spr_val = min_t(u32, result->spr_val, max->spr);
+ result->cur_val = min_t(u32, result->cur_val, max->cur);
result->enable = true;
}
@@ -2752,9 +2747,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
const struct intel_plane_state *curstate,
struct intel_wm_level *result)
{
- uint16_t pri_latency = dev_priv->wm.pri_latency[level];
- uint16_t spr_latency = dev_priv->wm.spr_latency[level];
- uint16_t cur_latency = dev_priv->wm.cur_latency[level];
+ u16 pri_latency = dev_priv->wm.pri_latency[level];
+ u16 spr_latency = dev_priv->wm.spr_latency[level];
+ u16 cur_latency = dev_priv->wm.cur_latency[level];
/* WM1+ latency values stored in 0.5us units */
if (level > 0) {
@@ -2778,7 +2773,7 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static uint32_t
+static u32
hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
const struct intel_atomic_state *intel_state =
@@ -2807,10 +2802,10 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
}
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[8])
+ u16 wm[8])
{
if (INTEL_GEN(dev_priv) >= 9) {
- uint32_t val;
+ u32 val;
int ret, i;
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2894,7 +2889,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- uint64_t sskpd = I915_READ64(MCH_SSKPD);
+ u64 sskpd = I915_READ64(MCH_SSKPD);
wm[0] = (sskpd >> 56) & 0xFF;
if (wm[0] == 0)
@@ -2904,14 +2899,14 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
wm[3] = (sskpd >> 20) & 0x1FF;
wm[4] = (sskpd >> 32) & 0x1FF;
} else if (INTEL_GEN(dev_priv) >= 6) {
- uint32_t sskpd = I915_READ(MCH_SSKPD);
+ u32 sskpd = I915_READ(MCH_SSKPD);
wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
} else if (INTEL_GEN(dev_priv) >= 5) {
- uint32_t mltr = I915_READ(MLTR_ILK);
+ u32 mltr = I915_READ(MLTR_ILK);
/* ILK primary LP0 latency is 700 ns */
wm[0] = 7;
@@ -2923,18 +2918,18 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
}
static void intel_fixup_spr_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[5])
+ u16 wm[5])
{
/* ILK sprite LP0 latency is 1300 ns */
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
wm[0] = 13;
}
static void intel_fixup_cur_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[5])
+ u16 wm[5])
{
/* ILK cursor LP0 latency is 1300 ns */
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
wm[0] = 13;
}
@@ -2953,7 +2948,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv)
static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
const char *name,
- const uint16_t wm[8])
+ const u16 wm[8])
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2982,7 +2977,7 @@ static void intel_print_wm_latency(struct drm_i915_private *dev_priv,
}
static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
- uint16_t wm[5], uint16_t min)
+ u16 wm[5], u16 min)
{
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -2991,7 +2986,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
wm[0] = max(wm[0], min);
for (level = 1; level <= max_level; level++)
- wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
+ wm[level] = max_t(u16, wm[level], DIV_ROUND_UP(min, 5));
return true;
}
@@ -3061,7 +3056,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
snb_wm_latency_quirk(dev_priv);
snb_wm_lp3_irq_quirk(dev_priv);
}
@@ -3073,7 +3068,7 @@ static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
intel_print_wm_latency(dev_priv, "Gen9 Plane", dev_priv->wm.skl_latency);
}
-static bool ilk_validate_pipe_wm(struct drm_device *dev,
+static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
struct intel_pipe_wm *pipe_wm)
{
/* LP0 watermark maximums depend on this pipe alone */
@@ -3085,7 +3080,7 @@ static bool ilk_validate_pipe_wm(struct drm_device *dev,
struct ilk_wm_maximums max;
/* LP0 watermarks always use 1/2 DDB partitioning */
- ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
/* At least LP0 must be valid */
if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
@@ -3150,7 +3145,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
- if (!ilk_validate_pipe_wm(dev, pipe_wm))
+ if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
return -EINVAL;
ilk_compute_wm_reg_maximums(dev_priv, 1, &max);
@@ -3180,17 +3175,17 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
* state and the new state. These can be programmed to the hardware
* immediately.
*/
-static int ilk_compute_intermediate_wm(struct drm_device *dev,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *newstate)
+static int ilk_compute_intermediate_wm(struct intel_crtc_state *newstate)
{
+ struct intel_crtc *intel_crtc = to_intel_crtc(newstate->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
struct intel_atomic_state *intel_state =
to_intel_atomic_state(newstate->base.state);
const struct intel_crtc_state *oldstate =
intel_atomic_get_old_crtc_state(intel_state, intel_crtc);
const struct intel_pipe_wm *b = &oldstate->wm.ilk.optimal;
- int level, max_level = ilk_wm_max_level(to_i915(dev));
+ int level, max_level = ilk_wm_max_level(dev_priv);
/*
* Start with the final, target watermarks, then combine with the
@@ -3223,7 +3218,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* there's no safe way to transition from the old state to
* the new state, so we need to fail the atomic transaction.
*/
- if (!ilk_validate_pipe_wm(dev, a))
+ if (!ilk_validate_pipe_wm(dev_priv, a))
return -EINVAL;
/*
@@ -3239,7 +3234,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
/*
* Merge the watermarks from all active pipes for a specific level.
*/
-static void ilk_merge_wm_level(struct drm_device *dev,
+static void ilk_merge_wm_level(struct drm_i915_private *dev_priv,
int level,
struct intel_wm_level *ret_wm)
{
@@ -3247,7 +3242,7 @@ static void ilk_merge_wm_level(struct drm_device *dev,
ret_wm->enable = true;
- for_each_intel_crtc(dev, intel_crtc) {
+ for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk;
const struct intel_wm_level *wm = &active->wm[level];
@@ -3272,12 +3267,11 @@ static void ilk_merge_wm_level(struct drm_device *dev,
/*
* Merge all low power watermarks for all active pipes.
*/
-static void ilk_wm_merge(struct drm_device *dev,
+static void ilk_wm_merge(struct drm_i915_private *dev_priv,
const struct intel_wm_config *config,
const struct ilk_wm_maximums *max,
struct intel_pipe_wm *merged)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
int level, max_level = ilk_wm_max_level(dev_priv);
int last_enabled_level = max_level;
@@ -3293,7 +3287,7 @@ static void ilk_wm_merge(struct drm_device *dev,
for (level = 1; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
- ilk_merge_wm_level(dev, level, wm);
+ ilk_merge_wm_level(dev_priv, level, wm);
if (level > last_enabled_level)
wm->enable = false;
@@ -3318,7 +3312,7 @@ static void ilk_wm_merge(struct drm_device *dev,
* What we should check here is whether FBC can be
* enabled sometime later.
*/
- if (IS_GEN5(dev_priv) && !merged->fbc_wm_enabled &&
+ if (IS_GEN(dev_priv, 5) && !merged->fbc_wm_enabled &&
intel_fbc_is_active(dev_priv)) {
for (level = 2; level <= max_level; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3335,22 +3329,20 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
}
/* The value we need to program into the WM_LPx latency field */
-static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
+static unsigned int ilk_wm_lp_latency(struct drm_i915_private *dev_priv,
+ int level)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
return 2 * level;
else
return dev_priv->wm.pri_latency[level];
}
-static void ilk_compute_wm_results(struct drm_device *dev,
+static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
const struct intel_pipe_wm *merged,
enum intel_ddb_partitioning partitioning,
struct ilk_wm_values *results)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc;
int level, wm_lp;
@@ -3370,7 +3362,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
* disabled. Doing otherwise could cause underruns.
*/
results->wm_lp[wm_lp - 1] =
- (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
+ (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) |
(r->pri_val << WM1_LP_SR_SHIFT) |
r->cur_val;
@@ -3396,7 +3388,7 @@ static void ilk_compute_wm_results(struct drm_device *dev,
}
/* LP0 register values */
- for_each_intel_crtc(dev, intel_crtc) {
+ for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
enum pipe pipe = intel_crtc->pipe;
const struct intel_wm_level *r =
&intel_crtc->wm.active.ilk.wm[0];
@@ -3415,11 +3407,12 @@ static void ilk_compute_wm_results(struct drm_device *dev,
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
- struct intel_pipe_wm *r1,
- struct intel_pipe_wm *r2)
+static struct intel_pipe_wm *
+ilk_find_best_result(struct drm_i915_private *dev_priv,
+ struct intel_pipe_wm *r1,
+ struct intel_pipe_wm *r2)
{
- int level, max_level = ilk_wm_max_level(to_i915(dev));
+ int level, max_level = ilk_wm_max_level(dev_priv);
int level1 = 0, level2 = 0;
for (level = 1; level <= max_level; level++) {
@@ -3540,7 +3533,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
{
struct ilk_wm_values *previous = &dev_priv->wm.hw;
unsigned int dirty;
- uint32_t val;
+ u32 val;
dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
if (!dirty)
@@ -3756,9 +3749,9 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state)
if (!intel_has_sagv(dev_priv))
return false;
- if (IS_GEN9(dev_priv))
+ if (IS_GEN(dev_priv, 9))
sagv_block_time_us = 30;
- else if (IS_GEN10(dev_priv))
+ else if (IS_GEN(dev_priv, 10))
sagv_block_time_us = 20;
else
sagv_block_time_us = 10;
@@ -3994,10 +3987,12 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
enum plane_id plane_id;
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return;
for_each_plane_id_on_crtc(crtc, plane_id)
@@ -4006,7 +4001,7 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
&ddb_y[plane_id],
&ddb_uv[plane_id]);
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
}
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
@@ -4036,7 +4031,7 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate)
{
struct intel_plane *plane = to_intel_plane(pstate->base.plane);
- uint32_t src_w, src_h, dst_w, dst_h;
+ u32 src_w, src_h, dst_w, dst_h;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@@ -4082,8 +4077,8 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
return pipe_downscale;
if (crtc_state->pch_pfit.enabled) {
- uint32_t src_w, src_h, dst_w, dst_h;
- uint32_t pfit_size = crtc_state->pch_pfit.size;
+ u32 src_w, src_h, dst_w, dst_h;
+ u32 pfit_size = crtc_state->pch_pfit.size;
uint_fixed_16_16_t fp_w_ratio, fp_h_ratio;
uint_fixed_16_16_t downscale_h, downscale_w;
@@ -4116,7 +4111,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
const struct drm_plane_state *pstate;
struct intel_plane_state *intel_pstate;
int crtc_clock, dotclk;
- uint32_t pipe_max_pixel_rate;
+ u32 pipe_max_pixel_rate;
uint_fixed_16_16_t pipe_downscale;
uint_fixed_16_16_t max_downscale = u32_to_fixed16(1);
@@ -4172,8 +4167,8 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
{
struct intel_plane *intel_plane =
to_intel_plane(intel_pstate->base.plane);
- uint32_t data_rate;
- uint32_t width = 0, height = 0;
+ u32 data_rate;
+ u32 width = 0, height = 0;
struct drm_framebuffer *fb;
u32 format;
uint_fixed_16_16_t down_scale_amount;
@@ -4306,102 +4301,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate,
return total_data_rate;
}
-static uint16_t
-skl_ddb_min_alloc(const struct drm_plane_state *pstate, const int plane)
-{
- struct drm_framebuffer *fb = pstate->fb;
- struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
- uint32_t src_w, src_h;
- uint32_t min_scanlines = 8;
- uint8_t plane_bpp;
-
- if (WARN_ON(!fb))
- return 0;
-
- /* For packed formats, and uv-plane, return 0 */
- if (plane == 1 && fb->format->format != DRM_FORMAT_NV12)
- return 0;
-
- /* For Non Y-tile return 8-blocks */
- if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED &&
- fb->modifier != I915_FORMAT_MOD_Y_TILED_CCS &&
- fb->modifier != I915_FORMAT_MOD_Yf_TILED_CCS)
- return 8;
-
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
- src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
-
- /* Halve UV plane width and height for NV12 */
- if (plane == 1) {
- src_w /= 2;
- src_h /= 2;
- }
-
- plane_bpp = fb->format->cpp[plane];
-
- if (drm_rotation_90_or_270(pstate->rotation)) {
- switch (plane_bpp) {
- case 1:
- min_scanlines = 32;
- break;
- case 2:
- min_scanlines = 16;
- break;
- case 4:
- min_scanlines = 8;
- break;
- case 8:
- min_scanlines = 4;
- break;
- default:
- WARN(1, "Unsupported pixel depth %u for rotation",
- plane_bpp);
- min_scanlines = 32;
- }
- }
-
- return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
-}
-
-static void
-skl_ddb_calc_min(const struct intel_crtc_state *cstate, int num_active,
- uint16_t *minimum, uint16_t *uv_minimum)
-{
- const struct drm_plane_state *pstate;
- struct drm_plane *plane;
-
- drm_atomic_crtc_state_for_each_plane_state(plane, pstate, &cstate->base) {
- enum plane_id plane_id = to_intel_plane(plane)->id;
- struct intel_plane_state *plane_state = to_intel_plane_state(pstate);
-
- if (plane_id == PLANE_CURSOR)
- continue;
-
- /* slave plane must be invisible and calculated from master */
- if (!pstate->visible || WARN_ON(plane_state->slave))
- continue;
-
- if (!plane_state->linked_plane) {
- minimum[plane_id] = skl_ddb_min_alloc(pstate, 0);
- uv_minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
- } else {
- enum plane_id y_plane_id =
- plane_state->linked_plane->id;
-
- minimum[y_plane_id] = skl_ddb_min_alloc(pstate, 0);
- minimum[plane_id] = skl_ddb_min_alloc(pstate, 1);
- }
- }
-
- minimum[PLANE_CURSOR] = skl_cursor_allocation(num_active);
-}
-
static int
skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb /* out */)
@@ -4411,15 +4310,17 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
- uint16_t alloc_size, start;
- uint16_t minimum[I915_MAX_PLANES] = {};
- uint16_t uv_minimum[I915_MAX_PLANES] = {};
+ struct skl_plane_wm *wm;
+ u16 alloc_size, start = 0;
+ u16 total[I915_MAX_PLANES] = {};
+ u16 uv_total[I915_MAX_PLANES] = {};
u64 total_data_rate;
enum plane_id plane_id;
int num_active;
u64 plane_data_rate[I915_MAX_PLANES] = {};
u64 uv_plane_data_rate[I915_MAX_PLANES] = {};
- uint16_t total_min_blocks = 0;
+ u16 blocks = 0;
+ int level;
/* Clear the partitioning for disabled planes. */
memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
@@ -4449,81 +4350,135 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
if (alloc_size == 0)
return 0;
- skl_ddb_calc_min(cstate, num_active, minimum, uv_minimum);
+ /* Allocate fixed number of blocks for cursor. */
+ total[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+ alloc_size -= total[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
+ alloc->end - total[PLANE_CURSOR];
+ cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
+
+ if (total_data_rate == 0)
+ return 0;
/*
- * 1. Allocate the mininum required blocks for each active plane
- * and allocate the cursor, it doesn't require extra allocation
- * proportional to the data rate.
+ * Find the highest watermark level for which we can satisfy the block
+ * requirement of active planes.
*/
+ for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
+ blocks = 0;
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- total_min_blocks += minimum[plane_id];
- total_min_blocks += uv_minimum[plane_id];
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+ blocks += wm->wm[level].plane_res_b;
+ blocks += wm->uv_wm[level].plane_res_b;
+ }
+
+ if (blocks < alloc_size) {
+ alloc_size -= blocks;
+ break;
+ }
}
- if (total_min_blocks > alloc_size) {
+ if (level < 0) {
DRM_DEBUG_KMS("Requested display configuration exceeds system DDB limitations");
- DRM_DEBUG_KMS("minimum required %d/%d\n", total_min_blocks,
- alloc_size);
+ DRM_DEBUG_KMS("minimum required %d/%d\n", blocks,
+ alloc_size);
return -EINVAL;
}
- alloc_size -= total_min_blocks;
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
- cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
-
/*
- * 2. Distribute the remaining space in proportion to the amount of
- * data each plane needs to fetch from memory.
- *
- * FIXME: we may not allocate every single block here.
+ * Grant each plane the blocks it requires at the highest achievable
+ * watermark level, plus an extra share of the leftover blocks
+ * proportional to its relative data rate.
*/
- if (total_data_rate == 0)
- return 0;
-
- start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- u64 data_rate, uv_data_rate;
- uint16_t plane_blocks, uv_plane_blocks;
+ u64 rate;
+ u16 extra;
if (plane_id == PLANE_CURSOR)
continue;
- data_rate = plane_data_rate[plane_id];
-
/*
- * allocation for (packed formats) or (uv-plane part of planar format):
- * promote the expression to 64 bits to avoid overflowing, the
- * result is < available as data_rate / total_data_rate < 1
+ * We've accounted for all active planes; remaining planes are
+ * all disabled.
*/
- plane_blocks = minimum[plane_id];
- plane_blocks += div64_u64(alloc_size * data_rate, total_data_rate);
+ if (total_data_rate == 0)
+ break;
- /* Leave disabled planes at (0,0) */
- if (data_rate) {
- cstate->wm.skl.plane_ddb_y[plane_id].start = start;
- cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
- }
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
- start += plane_blocks;
+ rate = plane_data_rate[plane_id];
+ extra = min_t(u16, alloc_size,
+ DIV64_U64_ROUND_UP(alloc_size * rate,
+ total_data_rate));
+ total[plane_id] = wm->wm[level].plane_res_b + extra;
+ alloc_size -= extra;
+ total_data_rate -= rate;
- /* Allocate DDB for UV plane for planar format/NV12 */
- uv_data_rate = uv_plane_data_rate[plane_id];
+ if (total_data_rate == 0)
+ break;
+
+ rate = uv_plane_data_rate[plane_id];
+ extra = min_t(u16, alloc_size,
+ DIV64_U64_ROUND_UP(alloc_size * rate,
+ total_data_rate));
+ uv_total[plane_id] = wm->uv_wm[level].plane_res_b + extra;
+ alloc_size -= extra;
+ total_data_rate -= rate;
+ }
+ WARN_ON(alloc_size != 0 || total_data_rate != 0);
+
+ /* Set the actual DDB start/end points for each plane */
+ start = alloc->start;
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ struct skl_ddb_entry *plane_alloc, *uv_plane_alloc;
+
+ if (plane_id == PLANE_CURSOR)
+ continue;
- uv_plane_blocks = uv_minimum[plane_id];
- uv_plane_blocks += div64_u64(alloc_size * uv_data_rate, total_data_rate);
+ plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id];
+ uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id];
/* Gen11+ uses a separate plane for UV watermarks */
- WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
+ WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
- if (uv_data_rate) {
- cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
- cstate->wm.skl.plane_ddb_uv[plane_id].end =
- start + uv_plane_blocks;
+ /* Leave disabled planes at (0,0) */
+ if (total[plane_id]) {
+ plane_alloc->start = start;
+ start += total[plane_id];
+ plane_alloc->end = start;
+ }
+
+ if (uv_total[plane_id]) {
+ uv_plane_alloc->start = start;
+ start += uv_total[plane_id];
+ uv_plane_alloc->end = start;
+ }
+ }
+
+ /*
+ * When we calculated watermark values we didn't know how high
+ * of a level we'd actually be able to hit, so we just marked
+ * all levels as "enabled." Go back now and disable the ones
+ * that aren't actually possible.
+ */
+ for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+ memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
}
+ }
- start += uv_plane_blocks;
+ /*
+ * Go back and disable the transition watermark if it turns out we
+ * don't have enough DDB blocks for it.
+ */
+ for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ wm = &cstate->wm.skl.optimal.planes[plane_id];
+ if (wm->trans_wm.plane_res_b > total[plane_id])
+ memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
}
return 0;
@@ -4536,10 +4491,10 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
*/
static uint_fixed_16_16_t
-skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
- uint8_t cpp, uint32_t latency, uint32_t dbuf_block_size)
+skl_wm_method1(const struct drm_i915_private *dev_priv, u32 pixel_rate,
+ u8 cpp, u32 latency, u32 dbuf_block_size)
{
- uint32_t wm_intermediate_val;
+ u32 wm_intermediate_val;
uint_fixed_16_16_t ret;
if (latency == 0)
@@ -4554,12 +4509,11 @@ skl_wm_method1(const struct drm_i915_private *dev_priv, uint32_t pixel_rate,
return ret;
}
-static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
- uint32_t pipe_htotal,
- uint32_t latency,
- uint_fixed_16_16_t plane_blocks_per_line)
+static uint_fixed_16_16_t
+skl_wm_method2(u32 pixel_rate, u32 pipe_htotal, u32 latency,
+ uint_fixed_16_16_t plane_blocks_per_line)
{
- uint32_t wm_intermediate_val;
+ u32 wm_intermediate_val;
uint_fixed_16_16_t ret;
if (latency == 0)
@@ -4575,8 +4529,8 @@ static uint_fixed_16_16_t skl_wm_method2(uint32_t pixel_rate,
static uint_fixed_16_16_t
intel_get_linetime_us(const struct intel_crtc_state *cstate)
{
- uint32_t pixel_rate;
- uint32_t crtc_htotal;
+ u32 pixel_rate;
+ u32 crtc_htotal;
uint_fixed_16_16_t linetime_us;
if (!cstate->base.active)
@@ -4593,11 +4547,11 @@ intel_get_linetime_us(const struct intel_crtc_state *cstate)
return linetime_us;
}
-static uint32_t
+static u32
skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate)
{
- uint64_t adjusted_pixel_rate;
+ u64 adjusted_pixel_rate;
uint_fixed_16_16_t downscale_amount;
/* Shouldn't reach here on disabled planes... */
@@ -4624,7 +4578,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_plane_state *pstate = &intel_pstate->base;
const struct drm_framebuffer *fb = pstate->fb;
- uint32_t interm_pbpl;
+ u32 interm_pbpl;
struct intel_atomic_state *state =
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
@@ -4702,7 +4656,7 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
wp->plane_blocks_per_line = div_fixed16(interm_pbpl,
wp->y_min_scanlines);
- } else if (wp->x_tiled && IS_GEN9(dev_priv)) {
+ } else if (wp->x_tiled && IS_GEN(dev_priv, 9)) {
interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line,
wp->dbuf_block_size);
wp->plane_blocks_per_line = u32_to_fixed16(interm_pbpl);
@@ -4720,28 +4674,22 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
return 0;
}
-static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
- uint16_t ddb_allocation,
- int level,
- const struct skl_wm_params *wp,
- const struct skl_wm_level *result_prev,
- struct skl_wm_level *result /* out */)
+static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *intel_pstate,
+ int level,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */)
{
struct drm_i915_private *dev_priv =
to_i915(intel_pstate->base.plane->dev);
- const struct drm_plane_state *pstate = &intel_pstate->base;
- uint32_t latency = dev_priv->wm.skl_latency[level];
+ u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
- uint32_t res_blocks, res_lines;
+ u32 res_blocks, res_lines;
struct intel_atomic_state *state =
to_intel_atomic_state(cstate->base.state);
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
- uint32_t min_disp_buf_needed;
-
- if (latency == 0)
- return level == 0 ? -EINVAL : 0;
/* Display WA #1141: kbl,cfl */
if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv) ||
@@ -4766,15 +4714,8 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
wp->dbuf_block_size < 1) &&
(wp->plane_bytes_per_line / wp->dbuf_block_size < 1)) {
selected_result = method2;
- } else if (ddb_allocation >=
- fixed16_to_u32_round_up(wp->plane_blocks_per_line)) {
- if (IS_GEN9(dev_priv) &&
- !IS_GEMINILAKE(dev_priv))
- selected_result = min_fixed16(method1, method2);
- else
- selected_result = method2;
} else if (latency >= wp->linetime_us) {
- if (IS_GEN9(dev_priv) &&
+ if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))
selected_result = min_fixed16(method1, method2);
else
@@ -4788,85 +4729,51 @@ static int skl_compute_plane_wm(const struct intel_crtc_state *cstate,
res_lines = div_round_up_fixed16(selected_result,
wp->plane_blocks_per_line);
- /* Display WA #1125: skl,bxt,kbl,glk */
- if (level == 0 && wp->rc_surface)
- res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum);
-
- /* Display WA #1126: skl,bxt,kbl,glk */
- if (level >= 1 && level <= 7) {
- if (wp->y_tiled) {
- res_blocks += fixed16_to_u32_round_up(
- wp->y_tile_minimum);
- res_lines += wp->y_min_scanlines;
- } else {
- res_blocks++;
- }
-
- /*
- * Make sure result blocks for higher latency levels are atleast
- * as high as level below the current level.
- * Assumption in DDB algorithm optimization for special cases.
- * Also covers Display WA #1125 for RC.
- */
- if (result_prev->plane_res_b > res_blocks)
- res_blocks = result_prev->plane_res_b;
- }
-
- if (INTEL_GEN(dev_priv) >= 11) {
- if (wp->y_tiled) {
- uint32_t extra_lines;
- uint_fixed_16_16_t fp_min_disp_buf_needed;
-
- if (res_lines % wp->y_min_scanlines == 0)
- extra_lines = wp->y_min_scanlines;
- else
- extra_lines = wp->y_min_scanlines * 2 -
- res_lines % wp->y_min_scanlines;
-
- fp_min_disp_buf_needed = mul_u32_fixed16(res_lines +
- extra_lines,
- wp->plane_blocks_per_line);
- min_disp_buf_needed = fixed16_to_u32_round_up(
- fp_min_disp_buf_needed);
- } else {
- min_disp_buf_needed = DIV_ROUND_UP(res_blocks * 11, 10);
- }
- } else {
- min_disp_buf_needed = res_blocks;
- }
-
- if ((level > 0 && res_lines > 31) ||
- res_blocks >= ddb_allocation ||
- min_disp_buf_needed >= ddb_allocation) {
- /*
- * If there are no valid level 0 watermarks, then we can't
- * support this display configuration.
- */
- if (level) {
- return 0;
- } else {
- struct drm_plane *plane = pstate->plane;
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
+ /* Display WA #1125: skl,bxt,kbl */
+ if (level == 0 && wp->rc_surface)
+ res_blocks +=
+ fixed16_to_u32_round_up(wp->y_tile_minimum);
+
+ /* Display WA #1126: skl,bxt,kbl */
+ if (level >= 1 && level <= 7) {
+ if (wp->y_tiled) {
+ res_blocks +=
+ fixed16_to_u32_round_up(wp->y_tile_minimum);
+ res_lines += wp->y_min_scanlines;
+ } else {
+ res_blocks++;
+ }
- DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
- DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n",
- plane->base.id, plane->name,
- res_blocks, ddb_allocation, res_lines);
- return -EINVAL;
+ /*
+ * Make sure result blocks for higher latency levels are
+ * atleast as high as level below the current level.
+ * Assumption in DDB algorithm optimization for special
+ * cases. Also covers Display WA #1125 for RC.
+ */
+ if (result_prev->plane_res_b > res_blocks)
+ res_blocks = result_prev->plane_res_b;
}
}
/* The number of lines are ignored for the level 0 watermark. */
+ if (level > 0 && res_lines > 31)
+ return;
+
+ /*
+ * If res_lines is valid, assume we can use this watermark level
+ * for now. We'll come back and disable it after we calculate the
+ * DDB allocation if it turns out we don't actually have enough
+ * blocks to satisfy it.
+ */
result->plane_res_b = res_blocks;
result->plane_res_l = res_lines;
result->plane_en = true;
-
- return 0;
}
-static int
+static void
skl_compute_wm_levels(const struct intel_crtc_state *cstate,
const struct intel_plane_state *intel_pstate,
- uint16_t ddb_blocks,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
@@ -4874,34 +4781,24 @@ skl_compute_wm_levels(const struct intel_crtc_state *cstate,
to_i915(intel_pstate->base.plane->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
- int ret;
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- ret = skl_compute_plane_wm(cstate,
- intel_pstate,
- ddb_blocks,
- level,
- wm_params,
- result_prev,
- result);
- if (ret)
- return ret;
+ skl_compute_plane_wm(cstate, intel_pstate, level, wm_params,
+ result_prev, result);
result_prev = result;
}
-
- return 0;
}
-static uint32_t
+static u32
skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_i915_private *dev_priv = to_i915(state->dev);
uint_fixed_16_16_t linetime_us;
- uint32_t linetime_wm;
+ u32 linetime_wm;
linetime_us = intel_get_linetime_us(cstate);
@@ -4920,14 +4817,13 @@ skl_compute_linetime_wm(const struct intel_crtc_state *cstate)
static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
const struct skl_wm_params *wp,
- struct skl_plane_wm *wm,
- uint16_t ddb_allocation)
+ struct skl_plane_wm *wm)
{
struct drm_device *dev = cstate->base.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- uint16_t trans_min, trans_y_tile_min;
- const uint16_t trans_amount = 10; /* This is configurable amount */
- uint16_t wm0_sel_res_b, trans_offset_b, res_blocks;
+ u16 trans_min, trans_y_tile_min;
+ const u16 trans_amount = 10; /* This is configurable amount */
+ u16 wm0_sel_res_b, trans_offset_b, res_blocks;
/* Transition WM are not recommended by HW team for GEN9 */
if (INTEL_GEN(dev_priv) <= 9)
@@ -4956,8 +4852,8 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
wm0_sel_res_b = wm->wm[0].plane_res_b - 1;
if (wp->y_tiled) {
- trans_y_tile_min = (uint16_t) mul_round_up_u32_fixed16(2,
- wp->y_tile_minimum);
+ trans_y_tile_min =
+ (u16)mul_round_up_u32_fixed16(2, wp->y_tile_minimum);
res_blocks = max(wm0_sel_res_b, trans_y_tile_min) +
trans_offset_b;
} else {
@@ -4969,12 +4865,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
}
- res_blocks += 1;
-
- if (res_blocks < ddb_allocation) {
- wm->trans_wm.plane_res_b = res_blocks;
- wm->trans_wm.plane_en = true;
- }
+ /*
+ * Just assume we can enable the transition watermark. After
+ * computing the DDB we'll come back and disable it if that
+ * assumption turns out to be false.
+ */
+ wm->trans_wm.plane_res_b = res_blocks + 1;
+ wm->trans_wm.plane_en = true;
}
static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
@@ -4982,7 +4879,6 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
enum plane_id plane_id, int color_plane)
{
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
- u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
struct skl_wm_params wm_params;
int ret;
@@ -4991,12 +4887,8 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = skl_compute_wm_levels(crtc_state, plane_state,
- ddb_blocks, &wm_params, wm->wm);
- if (ret)
- return ret;
-
- skl_compute_transition_wm(crtc_state, &wm_params, wm, ddb_blocks);
+ skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm);
+ skl_compute_transition_wm(crtc_state, &wm_params, wm);
return 0;
}
@@ -5006,7 +4898,6 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
enum plane_id plane_id)
{
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
- u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
struct skl_wm_params wm_params;
int ret;
@@ -5018,10 +4909,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- ret = skl_compute_wm_levels(crtc_state, plane_state,
- ddb_blocks, &wm_params, wm->uv_wm);
- if (ret)
- return ret;
+ skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm);
return 0;
}
@@ -5139,7 +5027,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
i915_reg_t reg,
const struct skl_wm_level *level)
{
- uint32_t val = 0;
+ u32 val = 0;
if (level->plane_en) {
val |= PLANE_WM_EN;
@@ -5251,15 +5139,14 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
-static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
+static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
bool *changed /* out */)
{
- struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
- ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
+ ret = skl_build_pipe_wm(cstate, pipe_wm);
if (ret)
return ret;
@@ -5271,15 +5158,15 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
return 0;
}
-static uint32_t
-pipes_modified(struct drm_atomic_state *state)
+static u32
+pipes_modified(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- uint32_t i, ret = 0;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *cstate;
+ u32 i, ret = 0;
- for_each_new_crtc_in_state(state, crtc, cstate, i)
- ret |= drm_crtc_mask(crtc);
+ for_each_new_intel_crtc_in_state(state, crtc, cstate, i)
+ ret |= drm_crtc_mask(&crtc->base);
return ret;
}
@@ -5314,11 +5201,10 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
}
static int
-skl_compute_ddb(struct drm_atomic_state *state)
+skl_compute_ddb(struct intel_atomic_state *state)
{
- const struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
+ const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
@@ -5326,7 +5212,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
- for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
if (ret)
@@ -5372,15 +5258,13 @@ skl_print_wm_changes(struct intel_atomic_state *state)
}
static int
-skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
+skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = state->base.dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- const struct drm_crtc *crtc;
- const struct drm_crtc_state *cstate;
- struct intel_crtc *intel_crtc;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- uint32_t realloc_pipes = pipes_modified(state);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ u32 realloc_pipes = pipes_modified(state);
int ret, i;
/*
@@ -5398,7 +5282,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
* since any racing commits that want to update them would need to
* hold _all_ CRTC state mutexes.
*/
- for_each_new_crtc_in_state(state, crtc, cstate, i)
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
(*changed) = true;
if (!*changed)
@@ -5412,20 +5296,20 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
*/
if (dev_priv->wm.distrust_bios_wm) {
ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
- state->acquire_ctx);
+ state->base.acquire_ctx);
if (ret)
return ret;
- intel_state->active_pipe_changes = ~0;
+ state->active_pipe_changes = ~0;
/*
- * We usually only initialize intel_state->active_crtcs if we
+ * We usually only initialize state->active_crtcs if we
* we're doing a modeset; make sure this field is always
* initialized during the sanitization process that happens
* on the first commit too.
*/
- if (!intel_state->modeset)
- intel_state->active_crtcs = dev_priv->active_crtcs;
+ if (!state->modeset)
+ state->active_crtcs = dev_priv->active_crtcs;
}
/*
@@ -5441,21 +5325,19 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
* any other display updates race with this transaction, so we need
* to grab the lock on *all* CRTC's.
*/
- if (intel_state->active_pipe_changes || intel_state->modeset) {
+ if (state->active_pipe_changes || state->modeset) {
realloc_pipes = ~0;
- intel_state->wm_results.dirty_pipes = ~0;
+ state->wm_results.dirty_pipes = ~0;
}
/*
* We're not recomputing for the pipes not included in the commit, so
* make sure we start with the current state.
*/
- for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
- struct intel_crtc_state *cstate;
-
- cstate = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(cstate))
- return PTR_ERR(cstate);
+ for_each_intel_crtc_mask(dev, crtc, realloc_pipes) {
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
}
return 0;
@@ -5522,12 +5404,12 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
}
static int
-skl_compute_wm(struct drm_atomic_state *state)
+skl_compute_wm(struct intel_atomic_state *state)
{
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct skl_ddb_values *results = &intel_state->wm_results;
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *cstate;
+ struct intel_crtc_state *old_crtc_state;
+ struct skl_ddb_values *results = &state->wm_results;
struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
@@ -5539,47 +5421,35 @@ skl_compute_wm(struct drm_atomic_state *state)
if (ret || !changed)
return ret;
- ret = skl_compute_ddb(state);
- if (ret)
- return ret;
-
/*
* Calculate WM's for all pipes that are part of this transaction.
- * Note that the DDB allocation above may have added more CRTC's that
+ * Note that skl_ddb_add_affected_pipes may have added more CRTC's that
* weren't otherwise being modified (and set bits in dirty_pipes) if
* pipe allocations had to change.
- *
- * FIXME: Now that we're doing this in the atomic check phase, we
- * should allow skl_update_pipe_wm() to return failure in cases where
- * no suitable watermark values can be found.
*/
- for_each_new_crtc_in_state(state, crtc, cstate, i) {
- struct intel_crtc_state *intel_cstate =
- to_intel_crtc_state(cstate);
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ cstate, i) {
const struct skl_pipe_wm *old_pipe_wm =
- &to_intel_crtc_state(crtc->state)->wm.skl.optimal;
+ &old_crtc_state->wm.skl.optimal;
- pipe_wm = &intel_cstate->wm.skl.optimal;
+ pipe_wm = &cstate->wm.skl.optimal;
ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
if (ret)
return ret;
- ret = skl_wm_add_affected_planes(intel_state,
- to_intel_crtc(crtc));
+ ret = skl_wm_add_affected_planes(state, crtc);
if (ret)
return ret;
if (changed)
- results->dirty_pipes |= drm_crtc_mask(crtc);
-
- if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
- /* This pipe's WM's did not change */
- continue;
-
- intel_cstate->update_wm_pre = true;
+ results->dirty_pipes |= drm_crtc_mask(&crtc->base);
}
- skl_print_wm_changes(intel_state);
+ ret = skl_compute_ddb(state);
+ if (ret)
+ return ret;
+
+ skl_print_wm_changes(state);
return 0;
}
@@ -5617,13 +5487,13 @@ static void skl_initial_wm(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void ilk_compute_wm_config(struct drm_device *dev,
+static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
struct intel_wm_config *config)
{
struct intel_crtc *crtc;
/* Compute the currently _active_ config */
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
if (!wm->pipe_enabled)
@@ -5637,25 +5507,24 @@ static void ilk_compute_wm_config(struct drm_device *dev,
static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = &dev_priv->drm;
struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
struct ilk_wm_maximums max;
struct intel_wm_config config = {};
struct ilk_wm_values results = {};
enum intel_ddb_partitioning partitioning;
- ilk_compute_wm_config(dev, &config);
+ ilk_compute_wm_config(dev_priv, &config);
- ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
- ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_1_2, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_1_2);
/* 5/6 split only in single pipe config on IVB+ */
if (INTEL_GEN(dev_priv) >= 7 &&
config.num_pipes_active == 1 && config.sprites_enabled) {
- ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
- ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
+ ilk_compute_wm_maximums(dev_priv, 1, &config, INTEL_DDB_PART_5_6, &max);
+ ilk_wm_merge(dev_priv, &config, &max, &lp_wm_5_6);
- best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
+ best_lp_wm = ilk_find_best_result(dev_priv, &lp_wm_1_2, &lp_wm_5_6);
} else {
best_lp_wm = &lp_wm_1_2;
}
@@ -5663,7 +5532,7 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
partitioning = (best_lp_wm == &lp_wm_1_2) ?
INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
- ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
+ ilk_compute_wm_results(dev_priv, best_lp_wm, partitioning, &results);
ilk_write_wm_values(dev_priv, &results);
}
@@ -5694,7 +5563,7 @@ static void ilk_optimize_watermarks(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static inline void skl_wm_level_from_reg_val(uint32_t val,
+static inline void skl_wm_level_from_reg_val(u32 val,
struct skl_wm_level *level)
{
level->plane_en = val & PLANE_WM_EN;
@@ -5703,19 +5572,18 @@ static inline void skl_wm_level_from_reg_val(uint32_t val,
PLANE_WM_LINES_MASK;
}
-void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
+void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum pipe pipe = intel_crtc->pipe;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
int level, max_level;
enum plane_id plane_id;
- uint32_t val;
+ u32 val;
max_level = ilk_wm_max_level(dev_priv);
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm = &out->planes[plane_id];
for (level = 0; level <= max_level; level++) {
@@ -5735,30 +5603,27 @@ void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
skl_wm_level_from_reg_val(val, &wm->trans_wm);
}
- if (!intel_crtc->active)
+ if (!crtc->active)
return;
out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
}
-void skl_wm_get_hw_state(struct drm_device *dev)
+void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc *crtc;
struct intel_crtc_state *cstate;
skl_ddb_get_hw_state(dev_priv, ddb);
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- intel_crtc = to_intel_crtc(crtc);
- cstate = to_intel_crtc_state(crtc->state);
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ cstate = to_intel_crtc_state(crtc->base.state);
skl_pipe_wm_get_hw_state(crtc, &cstate->wm.skl.optimal);
- if (intel_crtc->active)
- hw->dirty_pipes |= drm_crtc_mask(crtc);
+ if (crtc->active)
+ hw->dirty_pipes |= drm_crtc_mask(&crtc->base);
}
if (dev_priv->active_crtcs) {
@@ -5767,15 +5632,14 @@ void skl_wm_get_hw_state(struct drm_device *dev)
}
}
-static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
+static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
+ struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->base.state);
struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
- enum pipe pipe = intel_crtc->pipe;
+ enum pipe pipe = crtc->pipe;
static const i915_reg_t wm0_pipe_reg[] = {
[PIPE_A] = WM0_PIPEA_ILK,
[PIPE_B] = WM0_PIPEB_ILK,
@@ -5788,7 +5652,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
memset(active, 0, sizeof(*active));
- active->pipe_enabled = intel_crtc->active;
+ active->pipe_enabled = crtc->active;
if (active->pipe_enabled) {
u32 tmp = hw->wm_pipe[pipe];
@@ -5816,7 +5680,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
active->wm[level].enable = true;
}
- intel_crtc->wm.active.ilk = *active;
+ crtc->wm.active.ilk = *active;
}
#define _FW_WM(value, plane) \
@@ -5827,7 +5691,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
static void g4x_read_wm_values(struct drm_i915_private *dev_priv,
struct g4x_wm_values *wm)
{
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(DSPFW1);
wm->sr.plane = _FW_WM(tmp, SR);
@@ -5854,7 +5718,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
enum pipe pipe;
- uint32_t tmp;
+ u32 tmp;
for_each_pipe(dev_priv, pipe) {
tmp = I915_READ(VLV_DDL(pipe));
@@ -5926,9 +5790,8 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv,
#undef _FW_WM
#undef _FW_WM_VLV
-void g4x_wm_get_hw_state(struct drm_device *dev)
+void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct g4x_wm_values *wm = &dev_priv->wm.g4x;
struct intel_crtc *crtc;
@@ -5936,7 +5799,7 @@ void g4x_wm_get_hw_state(struct drm_device *dev)
wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct g4x_wm_state *active = &crtc->wm.active.g4x;
@@ -6067,9 +5930,8 @@ void g4x_wm_sanitize(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-void vlv_wm_get_hw_state(struct drm_device *dev)
+void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct vlv_wm_values *wm = &dev_priv->wm.vlv;
struct intel_crtc *crtc;
u32 val;
@@ -6113,7 +5975,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
mutex_unlock(&dev_priv->pcu_lock);
}
- for_each_intel_crtc(dev, crtc) {
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
struct vlv_wm_state *active = &crtc->wm.active.vlv;
@@ -6230,15 +6092,14 @@ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv)
*/
}
-void ilk_wm_get_hw_state(struct drm_device *dev)
+void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct ilk_wm_values *hw = &dev_priv->wm.hw;
- struct drm_crtc *crtc;
+ struct intel_crtc *crtc;
ilk_init_lp_watermarks(dev_priv);
- for_each_crtc(dev, crtc)
+ for_each_intel_crtc(&dev_priv->drm, crtc)
ilk_pipe_wm_get_hw_state(crtc);
hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
@@ -6339,10 +6200,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
*/
DEFINE_SPINLOCK(mchdev_lock);
-/* Global for IPS driver to get at the current i915 device. Protected by
- * mchdev_lock. */
-static struct drm_i915_private *i915_mch_dev;
-
bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{
u16 rgvswctl;
@@ -6805,7 +6662,7 @@ void gen6_rps_boost(struct i915_request *rq,
if (!rps->enabled)
return;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
+ if (i915_request_signaled(rq))
return;
/* Serializes with i915_request_retire() */
@@ -7049,7 +6906,7 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
/* Program defaults and thresholds for RPS */
- if (IS_GEN9(dev_priv))
+ if (IS_GEN(dev_priv, 9))
I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq));
@@ -7285,9 +7142,9 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
- if (IS_GEN6(dev_priv) && ret) {
+ if (IS_GEN(dev_priv, 6) && ret) {
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
- } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
+ } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
@@ -7412,7 +7269,7 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
- switch (INTEL_INFO(dev_priv)->sseu.eu_total) {
+ switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) {
case 8:
/* (2 * 4) config */
rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -7985,16 +7842,17 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{
- unsigned long val;
+ intel_wakeref_t wakeref;
+ unsigned long val = 0;
- if (!IS_GEN5(dev_priv))
+ if (!IS_GEN(dev_priv, 5))
return 0;
- spin_lock_irq(&mchdev_lock);
-
- val = __i915_chipset_val(dev_priv);
-
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ val = __i915_chipset_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
return val;
}
@@ -8071,14 +7929,16 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{
- if (!IS_GEN5(dev_priv))
- return;
+ intel_wakeref_t wakeref;
- spin_lock_irq(&mchdev_lock);
-
- __i915_update_gfx_val(dev_priv);
+ if (!IS_GEN(dev_priv, 5))
+ return;
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ __i915_update_gfx_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
}
static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
@@ -8120,18 +7980,34 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{
- unsigned long val;
+ intel_wakeref_t wakeref;
+ unsigned long val = 0;
- if (!IS_GEN5(dev_priv))
+ if (!IS_GEN(dev_priv, 5))
return 0;
- spin_lock_irq(&mchdev_lock);
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ val = __i915_gfx_val(dev_priv);
+ spin_unlock_irq(&mchdev_lock);
+ }
- val = __i915_gfx_val(dev_priv);
+ return val;
+}
- spin_unlock_irq(&mchdev_lock);
+static struct drm_i915_private *i915_mch_dev;
- return val;
+static struct drm_i915_private *mchdev_get(void)
+{
+ struct drm_i915_private *i915;
+
+ rcu_read_lock();
+ i915 = i915_mch_dev;
+ if (!kref_get_unless_zero(&i915->drm.ref))
+ i915 = NULL;
+ rcu_read_unlock();
+
+ return i915;
}
/**
@@ -8142,23 +8018,24 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
*/
unsigned long i915_read_mch_val(void)
{
- struct drm_i915_private *dev_priv;
- unsigned long chipset_val, graphics_val, ret = 0;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev)
- goto out_unlock;
- dev_priv = i915_mch_dev;
-
- chipset_val = __i915_chipset_val(dev_priv);
- graphics_val = __i915_gfx_val(dev_priv);
+ struct drm_i915_private *i915;
+ unsigned long chipset_val = 0;
+ unsigned long graphics_val = 0;
+ intel_wakeref_t wakeref;
- ret = chipset_val + graphics_val;
+ i915 = mchdev_get();
+ if (!i915)
+ return 0;
-out_unlock:
- spin_unlock_irq(&mchdev_lock);
+ with_intel_runtime_pm(i915, wakeref) {
+ spin_lock_irq(&mchdev_lock);
+ chipset_val = __i915_chipset_val(i915);
+ graphics_val = __i915_gfx_val(i915);
+ spin_unlock_irq(&mchdev_lock);
+ }
- return ret;
+ drm_dev_put(&i915->drm);
+ return chipset_val + graphics_val;
}
EXPORT_SYMBOL_GPL(i915_read_mch_val);
@@ -8169,23 +8046,19 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val);
*/
bool i915_gpu_raise(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
+ struct drm_i915_private *i915;
- if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
- dev_priv->ips.max_delay--;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ if (i915->ips.max_delay > i915->ips.fmax)
+ i915->ips.max_delay--;
spin_unlock_irq(&mchdev_lock);
- return ret;
+ drm_dev_put(&i915->drm);
+ return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_raise);
@@ -8197,23 +8070,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise);
*/
bool i915_gpu_lower(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
+ struct drm_i915_private *i915;
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
- dev_priv->ips.max_delay++;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ if (i915->ips.max_delay < i915->ips.min_delay)
+ i915->ips.max_delay++;
spin_unlock_irq(&mchdev_lock);
- return ret;
+ drm_dev_put(&i915->drm);
+ return true;
}
EXPORT_SYMBOL_GPL(i915_gpu_lower);
@@ -8224,13 +8093,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
*/
bool i915_gpu_busy(void)
{
- bool ret = false;
+ struct drm_i915_private *i915;
+ bool ret;
- spin_lock_irq(&mchdev_lock);
- if (i915_mch_dev)
- ret = i915_mch_dev->gt.awake;
- spin_unlock_irq(&mchdev_lock);
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
+ ret = i915->gt.awake;
+
+ drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_busy);
@@ -8243,24 +8115,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy);
*/
bool i915_gpu_turbo_disable(void)
{
- struct drm_i915_private *dev_priv;
- bool ret = true;
-
- spin_lock_irq(&mchdev_lock);
- if (!i915_mch_dev) {
- ret = false;
- goto out_unlock;
- }
- dev_priv = i915_mch_dev;
-
- dev_priv->ips.max_delay = dev_priv->ips.fstart;
+ struct drm_i915_private *i915;
+ bool ret;
- if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
- ret = false;
+ i915 = mchdev_get();
+ if (!i915)
+ return false;
-out_unlock:
+ spin_lock_irq(&mchdev_lock);
+ i915->ips.max_delay = i915->ips.fstart;
+ ret = ironlake_set_drps(i915, i915->ips.fstart);
spin_unlock_irq(&mchdev_lock);
+ drm_dev_put(&i915->drm);
return ret;
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
@@ -8289,18 +8156,14 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{
/* We only register the i915 ips part with intel-ips once everything is
* set up, to avoid intel-ips sneaking in and reading bogus values. */
- spin_lock_irq(&mchdev_lock);
- i915_mch_dev = dev_priv;
- spin_unlock_irq(&mchdev_lock);
+ rcu_assign_pointer(i915_mch_dev, dev_priv);
ips_ping_for_i915_load();
}
void intel_gpu_ips_teardown(void)
{
- spin_lock_irq(&mchdev_lock);
- i915_mch_dev = NULL;
- spin_unlock_irq(&mchdev_lock);
+ rcu_assign_pointer(i915_mch_dev, NULL);
}
static void intel_init_emon(struct drm_i915_private *dev_priv)
@@ -8410,7 +8273,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
intel_freq_opcode(dev_priv, 450));
/* After setting max-softlimit, find the overclock max freq */
- if (IS_GEN6(dev_priv) ||
+ if (IS_GEN(dev_priv, 6) ||
IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
u32 params = 0;
@@ -8639,7 +8502,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv)
static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+ u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
/*
* Required for FBC
@@ -8711,7 +8574,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
int pipe;
- uint32_t val;
+ u32 val;
/*
* On Ibex Peak and Cougar Point, we need to disable clock
@@ -8746,7 +8609,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
{
- uint32_t tmp;
+ u32 tmp;
tmp = I915_READ(MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
@@ -8756,7 +8619,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv)
static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
+ u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -8850,7 +8713,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
{
- uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
+ u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
/*
* WaVSThreadDispatchOverride:ivb,vlv
@@ -8886,7 +8749,7 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
static void lpt_suspend_hw(struct drm_i915_private *dev_priv)
{
if (HAS_PCH_LPT_LP(dev_priv)) {
- uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ u32 val = I915_READ(SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
@@ -9124,7 +8987,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t snpcr;
+ u32 snpcr;
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
@@ -9333,7 +9196,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv)
static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
{
- uint32_t dspclk_gate;
+ u32 dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
@@ -9480,9 +9343,9 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = ivb_init_clock_gating;
else if (IS_VALLEYVIEW(dev_priv))
dev_priv->display.init_clock_gating = vlv_init_clock_gating;
- else if (IS_GEN6(dev_priv))
+ else if (IS_GEN(dev_priv, 6))
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
- else if (IS_GEN5(dev_priv))
+ else if (IS_GEN(dev_priv, 5))
dev_priv->display.init_clock_gating = ilk_init_clock_gating;
else if (IS_G4X(dev_priv))
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
@@ -9490,11 +9353,11 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
else if (IS_I965G(dev_priv))
dev_priv->display.init_clock_gating = i965g_init_clock_gating;
- else if (IS_GEN3(dev_priv))
+ else if (IS_GEN(dev_priv, 3))
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
dev_priv->display.init_clock_gating = i85x_init_clock_gating;
- else if (IS_GEN2(dev_priv))
+ else if (IS_GEN(dev_priv, 2))
dev_priv->display.init_clock_gating = i830_init_clock_gating;
else {
MISSING_CASE(INTEL_DEVID(dev_priv));
@@ -9508,7 +9371,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For cxsr */
if (IS_PINEVIEW(dev_priv))
i915_pineview_get_mem_freq(dev_priv);
- else if (IS_GEN5(dev_priv))
+ else if (IS_GEN(dev_priv, 5))
i915_ironlake_get_mem_freq(dev_priv);
/* For FIFO watermark updates */
@@ -9520,9 +9383,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
- if ((IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[1] &&
+ if ((IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[1] &&
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
- (!IS_GEN5(dev_priv) && dev_priv->wm.pri_latency[0] &&
+ (!IS_GEN(dev_priv, 5) && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
dev_priv->display.compute_intermediate_wm =
@@ -9563,12 +9426,12 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.update_wm = NULL;
} else
dev_priv->display.update_wm = pineview_update_wm;
- } else if (IS_GEN4(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 4)) {
dev_priv->display.update_wm = i965_update_wm;
- } else if (IS_GEN3(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 3)) {
dev_priv->display.update_wm = i9xx_update_wm;
dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
- } else if (IS_GEN2(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 2)) {
if (INTEL_INFO(dev_priv)->num_pipes == 1) {
dev_priv->display.update_wm = i845_update_wm;
dev_priv->display.get_fifo_size = i845_get_fifo_size;
@@ -9583,7 +9446,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
{
- uint32_t flags =
+ u32 flags =
I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
switch (flags) {
@@ -9606,7 +9469,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
{
- uint32_t flags =
+ u32 flags =
I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
switch (flags) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 419e56342523..8dbf26c212cc 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -51,7 +51,6 @@
* must be correctly synchronized/cancelled when shutting down the pipe."
*/
-#include <drm/drmP.h>
#include "intel_drv.h"
#include "i915_drv.h"
@@ -231,7 +230,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
{
- uint8_t dprx = 0;
+ u8 dprx = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
&dprx) != 1)
@@ -241,7 +240,7 @@ static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
{
- uint8_t alpm_caps = 0;
+ u8 alpm_caps = 0;
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
&alpm_caps) != 1)
@@ -261,6 +260,32 @@ static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
return val;
}
+static u16 intel_dp_get_su_x_granulartiy(struct intel_dp *intel_dp)
+{
+ u16 val;
+ ssize_t r;
+
+ /*
+ * Returning the default X granularity if granularity not required or
+ * if DPCD read fails
+ */
+ if (!(intel_dp->psr_dpcd[1] & DP_PSR2_SU_GRANULARITY_REQUIRED))
+ return 4;
+
+ r = drm_dp_dpcd_read(&intel_dp->aux, DP_PSR2_SU_X_GRANULARITY, &val, 2);
+ if (r != 2)
+ DRM_DEBUG_KMS("Unable to read DP_PSR2_SU_X_GRANULARITY\n");
+
+ /*
+ * Spec says that if the value read is 0 the default granularity should
+ * be used instead.
+ */
+ if (r != 2 || val == 0)
+ val = 4;
+
+ return val;
+}
+
void intel_psr_init_dpcd(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv =
@@ -274,10 +299,16 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
+ if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
+ DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
+ return;
+ }
+
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
return;
}
+
dev_priv->psr.sink_support = true;
dev_priv->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
@@ -309,6 +340,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
intel_dp_get_colorimetry_status(intel_dp);
+ dev_priv->psr.su_x_granularity =
+ intel_dp_get_su_x_granulartiy(intel_dp);
}
}
}
@@ -351,7 +384,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 aux_clock_divider, aux_ctl;
int i;
- static const uint8_t aux_msg[] = {
+ static const u8 aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
[2] = DP_SET_POWER & 0xff,
@@ -388,13 +421,15 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
if (dev_priv->psr.psr2_enabled) {
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE);
- dpcd_val |= DP_PSR_ENABLE_PSR2;
+ dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
+ } else {
+ if (dev_priv->psr.link_standby)
+ dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ dpcd_val |= DP_PSR_CRC_VERIFICATION;
}
- if (dev_priv->psr.link_standby)
- dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
- dpcd_val |= DP_PSR_CRC_VERIFICATION;
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
@@ -468,9 +503,6 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
- /* FIXME: selective update is probably totally broken because it doesn't
- * mesh at all with our frontbuffer tracking. And the hw alone isn't
- * good enough. */
val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
val |= EDP_Y_COORDINATE_ENABLE;
@@ -519,7 +551,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
psr_max_h = 4096;
psr_max_v = 2304;
- } else if (IS_GEN9(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 9)) {
psr_max_h = 3640;
psr_max_v = 2304;
}
@@ -531,6 +563,18 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ /*
+ * HW sends SU blocks of size four scan lines, which means the starting
+ * X coordinate and Y granularity requirements will always be met. We
+ * only need to validate the SU block width is a multiple of
+ * x granularity.
+ */
+ if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+ DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
+ crtc_hdisplay, dev_priv->psr.su_x_granularity);
+ return false;
+ }
+
return true;
}
@@ -641,17 +685,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_psr_setup_aux(intel_dp);
- if (dev_priv->psr.psr2_enabled) {
+ if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
+ !IS_GEMINILAKE(dev_priv))) {
i915_reg_t reg = gen9_chicken_trans_reg(dev_priv,
cpu_transcoder);
u32 chicken = I915_READ(reg);
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv))
- chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
- | PSR2_ADD_VERTICAL_LINE_COUNT);
-
- else
- chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
+ chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
+ PSR2_ADD_VERTICAL_LINE_COUNT;
I915_WRITE(reg, chicken);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index fbeaec3994e7..e39e483d8d16 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -29,7 +29,6 @@
#include <linux/log2.h>
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -133,7 +132,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE) {
cmd |= MI_EXE_FLUSH;
- if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
+ if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
cmd |= MI_INVALIDATE_ISP;
}
@@ -217,7 +216,7 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
* really our business. That leaves only stall at scoreboard.
*/
static int
-intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
+gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
{
u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
u32 *cs;
@@ -257,7 +256,7 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode)
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
- ret = intel_emit_post_sync_nonzero_flush(rq);
+ ret = gen6_emit_post_sync_nonzero_flush(rq);
if (ret)
return ret;
@@ -300,6 +299,37 @@ gen6_render_ring_flush(struct i915_request *rq, u32 mode)
return 0;
}
+static void gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+
+ /* Finally we can flush and with it emit the breadcrumb */
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = rq->global_seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+}
+static const int gen6_rcs_emit_breadcrumb_sz = 14;
+
static int
gen7_render_ring_cs_stall_wa(struct i915_request *rq)
{
@@ -379,11 +409,86 @@ gen7_render_ring_flush(struct i915_request *rq, u32 mode)
return 0;
}
-static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
+static void gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
+ PIPE_CONTROL_DEPTH_CACHE_FLUSH |
+ PIPE_CONTROL_DC_FLUSH_ENABLE |
+ PIPE_CONTROL_FLUSH_ENABLE |
+ PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL);
+ *cs++ = intel_hws_seqno_address(rq->engine);
+ *cs++ = rq->global_seqno;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+}
+static const int gen7_rcs_emit_breadcrumb_sz = 6;
+
+static void gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->global_seqno;
+ *cs++ = MI_USER_INTERRUPT;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+}
+static const int gen6_xcs_emit_breadcrumb_sz = 4;
+
+#define GEN7_XCS_WA 32
+static void gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
+{
+ int i;
+
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT;
+ *cs++ = rq->global_seqno;
+
+ for (i = 0; i < GEN7_XCS_WA; i++) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_INDEX_ADDR;
+ *cs++ = rq->global_seqno;
+ }
+
+ *cs++ = MI_FLUSH_DW;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
+}
+static const int gen7_xcs_emit_breadcrumb_sz = 8 + GEN7_XCS_WA * 3;
+#undef GEN7_XCS_WA
+
+static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
+{
+ /*
+ * Keep the render interrupt unmasked as this papers over
+ * lost interrupts following a reset.
+ */
+ if (engine->class == RENDER_CLASS) {
+ if (INTEL_GEN(engine->i915) >= 6)
+ mask &= ~BIT(0);
+ else
+ mask &= ~I915_USER_INTERRUPT;
+ }
+
+ intel_engine_set_hwsp_writemask(engine, mask);
+}
+
+static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
{
struct drm_i915_private *dev_priv = engine->i915;
- struct page *page = virt_to_page(engine->status_page.page_addr);
- phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
u32 addr;
addr = lower_32_bits(phys);
@@ -393,15 +498,25 @@ static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
I915_WRITE(HWS_PGA, addr);
}
-static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
+static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
+{
+ struct page *page = virt_to_page(engine->status_page.page_addr);
+ phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
+
+ set_hws_pga(engine, phys);
+ set_hwstam(engine, ~0u);
+}
+
+static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
{
struct drm_i915_private *dev_priv = engine->i915;
- i915_reg_t mmio;
+ i915_reg_t hwsp;
- /* The ring status page addresses are no longer next to the rest of
+ /*
+ * The ring status page addresses are no longer next to the rest of
* the ring registers as of gen7.
*/
- if (IS_GEN7(dev_priv)) {
+ if (IS_GEN(dev_priv, 7)) {
switch (engine->id) {
/*
* No more rings exist on Gen7. Default case is only to shut up
@@ -410,56 +525,55 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
default:
GEM_BUG_ON(engine->id);
case RCS:
- mmio = RENDER_HWS_PGA_GEN7;
+ hwsp = RENDER_HWS_PGA_GEN7;
break;
case BCS:
- mmio = BLT_HWS_PGA_GEN7;
+ hwsp = BLT_HWS_PGA_GEN7;
break;
case VCS:
- mmio = BSD_HWS_PGA_GEN7;
+ hwsp = BSD_HWS_PGA_GEN7;
break;
case VECS:
- mmio = VEBOX_HWS_PGA_GEN7;
+ hwsp = VEBOX_HWS_PGA_GEN7;
break;
}
- } else if (IS_GEN6(dev_priv)) {
- mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
+ } else if (IS_GEN(dev_priv, 6)) {
+ hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
} else {
- mmio = RING_HWS_PGA(engine->mmio_base);
+ hwsp = RING_HWS_PGA(engine->mmio_base);
}
- if (INTEL_GEN(dev_priv) >= 6) {
- u32 mask = ~0u;
+ I915_WRITE(hwsp, offset);
+ POSTING_READ(hwsp);
+}
- /*
- * Keep the render interrupt unmasked as this papers over
- * lost interrupts following a reset.
- */
- if (engine->id == RCS)
- mask &= ~BIT(0);
+static void flush_cs_tlb(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
- I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
- }
+ if (!IS_GEN_RANGE(dev_priv, 6, 7))
+ return;
- I915_WRITE(mmio, engine->status_page.ggtt_offset);
- POSTING_READ(mmio);
+ /* ring should be idle before issuing a sync flush*/
+ WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
- /* Flush the TLB for this page */
- if (IS_GEN(dev_priv, 6, 7)) {
- i915_reg_t reg = RING_INSTPM(engine->mmio_base);
+ I915_WRITE(instpm,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (intel_wait_for_register(dev_priv,
+ instpm, INSTPM_SYNC_FLUSH, 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ engine->name);
+}
- /* ring should be idle before issuing a sync flush*/
- WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
+static void ring_setup_status_page(struct intel_engine_cs *engine)
+{
+ set_hwsp(engine, engine->status_page.ggtt_offset);
+ set_hwstam(engine, ~0u);
- I915_WRITE(reg,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (intel_wait_for_register(dev_priv,
- reg, INSTPM_SYNC_FLUSH, 0,
- 1000))
- DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
- engine->name);
- }
+ flush_cs_tlb(engine);
}
static bool stop_ring(struct intel_engine_cs *engine)
@@ -529,17 +643,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
if (HWS_NEEDS_PHYSICAL(dev_priv))
ring_setup_phys_status_page(engine);
else
- intel_ring_setup_status_page(engine);
+ ring_setup_status_page(engine);
intel_engine_reset_breadcrumbs(engine);
- if (HAS_LEGACY_SEMAPHORES(engine->i915)) {
- I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
- I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
- if (HAS_VEBOX(dev_priv))
- I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
- }
-
/* Enforce ordering by reading HEAD register back */
I915_READ_HEAD(engine);
@@ -603,10 +710,6 @@ out:
static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
{
intel_engine_stop_cs(engine);
-
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
return i915_gem_find_active_request(engine);
}
@@ -679,7 +782,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
return ret;
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (IS_GEN(dev_priv, 4, 6))
+ if (IS_GEN_RANGE(dev_priv, 4, 6))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
/* We need to disable the AsyncFlip performance optimisations in order
@@ -688,22 +791,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
*
* WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
*/
- if (IS_GEN(dev_priv, 6, 7))
+ if (IS_GEN_RANGE(dev_priv, 6, 7))
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
/* Required for the hardware to program scanline values for waiting */
/* WaEnableFlushTlbInvalidationMode:snb */
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
I915_WRITE(GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN7(dev_priv))
+ if (IS_GEN(dev_priv, 7))
I915_WRITE(GFX_MODE_GEN7,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
/* From the Sandybridge PRM, volume 1 part 3, page 24:
* "If this bit is set, STCunit will have LRA as replacement
* policy. [...] This bit must be reset. LRA replacement
@@ -713,7 +816,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
_MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
}
- if (IS_GEN(dev_priv, 6, 7))
+ if (IS_GEN_RANGE(dev_priv, 6, 7))
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (INTEL_GEN(dev_priv) >= 6)
@@ -722,33 +825,6 @@ static int init_render_ring(struct intel_engine_cs *engine)
return 0;
}
-static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
-{
- struct drm_i915_private *dev_priv = rq->i915;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int num_rings = 0;
-
- for_each_engine(engine, dev_priv, id) {
- i915_reg_t mbox_reg;
-
- if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
- continue;
-
- mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
- if (i915_mmio_reg_valid(mbox_reg)) {
- *cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(mbox_reg);
- *cs++ = rq->global_seqno;
- num_rings++;
- }
- }
- if (num_rings & 1)
- *cs++ = MI_NOOP;
-
- return cs;
-}
-
static void cancel_requests(struct intel_engine_cs *engine)
{
struct i915_request *request;
@@ -760,8 +836,7 @@ static void cancel_requests(struct intel_engine_cs *engine)
list_for_each_entry(request, &engine->timeline.requests, link) {
GEM_BUG_ON(!request->global_seqno);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &request->fence.flags))
+ if (i915_request_signaled(request))
continue;
dma_fence_set_error(&request->fence, -EIO);
@@ -788,92 +863,41 @@ static void i9xx_submit_request(struct i915_request *request)
static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
+ *cs++ = MI_FLUSH;
+
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+ *cs++ = I915_GEM_HWS_INDEX_ADDR;
*cs++ = rq->global_seqno;
+
*cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
rq->tail = intel_ring_offset(rq, cs);
assert_ring_tail_valid(rq->ring, rq->tail);
}
+static const int i9xx_emit_breadcrumb_sz = 6;
-static const int i9xx_emit_breadcrumb_sz = 4;
-
-static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
-{
- return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
-}
-
-static int
-gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
+#define GEN5_WA_STORES 8 /* must be at least 1! */
+static void gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
{
- u32 dw1 = MI_SEMAPHORE_MBOX |
- MI_SEMAPHORE_COMPARE |
- MI_SEMAPHORE_REGISTER;
- u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
- u32 *cs;
-
- WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- *cs++ = dw1 | wait_mbox;
- /* Throughout all of the GEM code, seqno passed implies our current
- * seqno is >= the last seqno executed. However for hardware the
- * comparison is strictly greater than.
- */
- *cs++ = signal->global_seqno - 1;
- *cs++ = 0;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
+ int i;
- return 0;
-}
+ *cs++ = MI_FLUSH;
-static void
-gen5_seqno_barrier(struct intel_engine_cs *engine)
-{
- /* MI_STORE are internally buffered by the GPU and not flushed
- * either by MI_FLUSH or SyncFlush or any other combination of
- * MI commands.
- *
- * "Only the submission of the store operation is guaranteed.
- * The write result will be complete (coherent) some time later
- * (this is practically a finite period but there is no guaranteed
- * latency)."
- *
- * Empirically, we observe that we need a delay of at least 75us to
- * be sure that the seqno write is visible by the CPU.
- */
- usleep_range(125, 250);
-}
+ BUILD_BUG_ON(GEN5_WA_STORES < 1);
+ for (i = 0; i < GEN5_WA_STORES; i++) {
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_INDEX_ADDR;
+ *cs++ = rq->global_seqno;
+ }
-static void
-gen6_seqno_barrier(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
+ *cs++ = MI_USER_INTERRUPT;
- /* Workaround to force correct ordering between irq and seqno writes on
- * ivb (and maybe also on snb) by reading from a CS register (like
- * ACTHD) before reading the status page.
- *
- * Note that this effectively stalls the read by the time it takes to
- * do a memory transaction, which more or less ensures that the write
- * from the GPU has sufficient time to invalidate the CPU cacheline.
- * Alternatively we could delay the interrupt from the CS ring to give
- * the write time to land, but that would incur a delay after every
- * batch i.e. much more frequent than a delay when waiting for the
- * interrupt (with the same net latency).
- *
- * Also note that to prevent whole machine hangs on gen7, we have to
- * take the spinlock to guard against concurrent cacheline access.
- */
- spin_lock_irq(&dev_priv->uncore.lock);
- POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
- spin_unlock_irq(&dev_priv->uncore.lock);
+ rq->tail = intel_ring_offset(rq, cs);
+ assert_ring_tail_valid(rq->ring, rq->tail);
}
+static const int gen5_emit_breadcrumb_sz = GEN5_WA_STORES * 3 + 2;
+#undef GEN5_WA_STORES
static void
gen5_irq_enable(struct intel_engine_cs *engine)
@@ -948,6 +972,10 @@ gen6_irq_enable(struct intel_engine_cs *engine)
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask |
engine->irq_keep_mask));
+
+ /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
+
gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
}
@@ -966,6 +994,10 @@ hsw_vebox_irq_enable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+
+ /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
+
gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
}
@@ -1581,10 +1613,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct intel_engine_cs *engine = rq->engine;
enum intel_engine_id id;
const int num_rings =
- /* Use an extended w/a on gen7 if signalling from other rings */
- (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
- INTEL_INFO(i915)->num_rings - 1 :
- 0;
+ IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
bool force_restore = false;
int len;
u32 *cs;
@@ -1597,7 +1626,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
len = 4;
- if (IS_GEN7(i915))
+ if (IS_GEN(i915, 7))
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
if (flags & MI_FORCE_RESTORE) {
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
@@ -1611,7 +1640,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
return PTR_ERR(cs);
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (IS_GEN7(i915)) {
+ if (IS_GEN(i915, 7)) {
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
if (num_rings) {
struct intel_engine_cs *signaller;
@@ -1658,7 +1687,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*/
*cs++ = MI_NOOP;
- if (IS_GEN7(i915)) {
+ if (IS_GEN(i915, 7)) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
@@ -1829,17 +1858,19 @@ static int ring_request_alloc(struct i915_request *request)
GEM_BUG_ON(!request->hw_context->pin_count);
- /* Flush enough space to reduce the likelihood of waiting after
+ /*
+ * Flush enough space to reduce the likelihood of waiting after
* we start building the request - in which case we will just
* have to repeat work.
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
+ ret = switch_context(request);
if (ret)
return ret;
- ret = switch_context(request);
+ /* Unconditionally invalidate GPU caches and TLBs. */
+ ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
if (ret)
return ret;
@@ -1881,22 +1912,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
return 0;
}
-int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
-{
- GEM_BUG_ON(bytes > ring->effective_size);
- if (unlikely(bytes > ring->effective_size - ring->emit))
- bytes += ring->size - ring->emit;
-
- if (unlikely(bytes > ring->space)) {
- int ret = wait_for_space(ring, bytes);
- if (unlikely(ret))
- return ret;
- }
-
- GEM_BUG_ON(ring->space < bytes);
- return 0;
-}
-
u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
{
struct intel_ring *ring = rq->ring;
@@ -2129,77 +2144,15 @@ static int gen6_ring_flush(struct i915_request *rq, u32 mode)
return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
}
-static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
-{
- int i;
-
- if (!HAS_LEGACY_SEMAPHORES(dev_priv))
- return;
-
- GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
- engine->semaphore.sync_to = gen6_ring_sync_to;
- engine->semaphore.signal = gen6_signal;
-
- /*
- * The current semaphore is only applied on pre-gen8
- * platform. And there is no VCS2 ring on the pre-gen8
- * platform. So the semaphore between RCS and VCS2 is
- * initialized as INVALID.
- */
- for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
- static const struct {
- u32 wait_mbox;
- i915_reg_t mbox_reg;
- } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
- [RCS_HW] = {
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
- },
- [VCS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
- },
- [BCS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
- [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
- },
- [VECS_HW] = {
- [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
- [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
- [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
- },
- };
- u32 wait_mbox;
- i915_reg_t mbox_reg;
-
- if (i == engine->hw_id) {
- wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
- mbox_reg = GEN6_NOSYNC;
- } else {
- wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
- mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
- }
-
- engine->semaphore.mbox.wait[i] = wait_mbox;
- engine->semaphore.mbox.signal[i] = mbox_reg;
- }
-}
-
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
if (INTEL_GEN(dev_priv) >= 6) {
engine->irq_enable = gen6_irq_enable;
engine->irq_disable = gen6_irq_disable;
- engine->irq_seqno_barrier = gen6_seqno_barrier;
} else if (INTEL_GEN(dev_priv) >= 5) {
engine->irq_enable = gen5_irq_enable;
engine->irq_disable = gen5_irq_disable;
- engine->irq_seqno_barrier = gen5_seqno_barrier;
} else if (INTEL_GEN(dev_priv) >= 3) {
engine->irq_enable = i9xx_irq_enable;
engine->irq_disable = i9xx_irq_disable;
@@ -2231,7 +2184,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
intel_ring_init_irq(dev_priv, engine);
- intel_ring_init_semaphores(dev_priv, engine);
engine->init_hw = init_ring_common;
engine->reset.prepare = reset_prepare;
@@ -2243,15 +2195,9 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
- if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
- int num_rings;
-
- engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
-
- num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
- engine->emit_breadcrumb_sz += num_rings * 3;
- if (num_rings & 1)
- engine->emit_breadcrumb_sz++;
+ if (IS_GEN(dev_priv, 5)) {
+ engine->emit_breadcrumb = gen5_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen5_emit_breadcrumb_sz;
}
engine->set_default_submission = i9xx_set_default_submission;
@@ -2278,12 +2224,17 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
- if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(dev_priv) >= 7) {
engine->init_context = intel_rcs_ctx_init;
engine->emit_flush = gen7_render_ring_flush;
- if (IS_GEN6(dev_priv))
- engine->emit_flush = gen6_render_ring_flush;
- } else if (IS_GEN5(dev_priv)) {
+ engine->emit_breadcrumb = gen7_rcs_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen7_rcs_emit_breadcrumb_sz;
+ } else if (IS_GEN(dev_priv, 6)) {
+ engine->init_context = intel_rcs_ctx_init;
+ engine->emit_flush = gen6_render_ring_flush;
+ engine->emit_breadcrumb = gen6_rcs_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen6_rcs_emit_breadcrumb_sz;
+ } else if (IS_GEN(dev_priv, 5)) {
engine->emit_flush = gen4_render_ring_flush;
} else {
if (INTEL_GEN(dev_priv) < 4)
@@ -2313,13 +2264,21 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) >= 6) {
/* gen6 bsd needs a special wa for tail updates */
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_bsd_ring_flush;
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+
+ if (IS_GEN(dev_priv, 6)) {
+ engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen6_xcs_emit_breadcrumb_sz;
+ } else {
+ engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz;
+ }
} else {
engine->emit_flush = bsd_ring_flush;
- if (IS_GEN5(dev_priv))
+ if (IS_GEN(dev_priv, 5))
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
else
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
@@ -2332,11 +2291,21 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
+
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_ring_flush;
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
+ if (IS_GEN(dev_priv, 6)) {
+ engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen6_xcs_emit_breadcrumb_sz;
+ } else {
+ engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz;
+ }
+
return intel_init_ring_buffer(engine);
}
@@ -2344,6 +2313,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
+ GEM_BUG_ON(INTEL_GEN(dev_priv) < 7);
+
intel_ring_default_vfuncs(dev_priv, engine);
engine->emit_flush = gen6_ring_flush;
@@ -2351,5 +2322,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
engine->irq_enable = hsw_vebox_irq_enable;
engine->irq_disable = hsw_vebox_irq_disable;
+ engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
+ engine->emit_breadcrumb_sz = gen7_xcs_emit_breadcrumb_sz;
+
return intel_init_ring_buffer(engine);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 72edaa7ff411..c3ef0f9bf321 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -28,7 +28,7 @@ struct i915_sched_attr;
* workarounds!
*/
#define CACHELINE_BYTES 64
-#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(uint32_t))
+#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
struct intel_hw_status_page {
struct i915_vma *vma;
@@ -94,12 +94,12 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
#define I915_MAX_SUBSLICES 8
#define instdone_slice_mask(dev_priv__) \
- (IS_GEN7(dev_priv__) ? \
- 1 : INTEL_INFO(dev_priv__)->sseu.slice_mask)
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
#define instdone_subslice_mask(dev_priv__) \
- (IS_GEN7(dev_priv__) ? \
- 1 : INTEL_INFO(dev_priv__)->sseu.subslice_mask[0])
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
for ((slice__) = 0, (subslice__) = 0; \
@@ -365,9 +365,6 @@ struct intel_engine_cs {
struct drm_i915_gem_object *default_state;
void *pinned_default_state;
- unsigned long irq_posted;
-#define ENGINE_IRQ_BREADCRUMB 0
-
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
@@ -401,7 +398,6 @@ struct intel_engine_cs {
unsigned int irq_count;
bool irq_armed : 1;
- I915_SELFTEST_DECLARE(bool mock : 1);
} breadcrumbs;
struct {
@@ -501,69 +497,8 @@ struct intel_engine_cs {
*/
void (*cancel_requests)(struct intel_engine_cs *engine);
- /* Some chipsets are not quite as coherent as advertised and need
- * an expensive kick to force a true read of the up-to-date seqno.
- * However, the up-to-date seqno is not always required and the last
- * seen value is good enough. Note that the seqno will always be
- * monotonic, even if not coherent.
- */
- void (*irq_seqno_barrier)(struct intel_engine_cs *engine);
void (*cleanup)(struct intel_engine_cs *engine);
- /* GEN8 signal/wait table - never trust comments!
- * signal to signal to signal to signal to signal to
- * RCS VCS BCS VECS VCS2
- * --------------------------------------------------------------------
- * RCS | NOP (0x00) | VCS (0x08) | BCS (0x10) | VECS (0x18) | VCS2 (0x20) |
- * |-------------------------------------------------------------------
- * VCS | RCS (0x28) | NOP (0x30) | BCS (0x38) | VECS (0x40) | VCS2 (0x48) |
- * |-------------------------------------------------------------------
- * BCS | RCS (0x50) | VCS (0x58) | NOP (0x60) | VECS (0x68) | VCS2 (0x70) |
- * |-------------------------------------------------------------------
- * VECS | RCS (0x78) | VCS (0x80) | BCS (0x88) | NOP (0x90) | VCS2 (0x98) |
- * |-------------------------------------------------------------------
- * VCS2 | RCS (0xa0) | VCS (0xa8) | BCS (0xb0) | VECS (0xb8) | NOP (0xc0) |
- * |-------------------------------------------------------------------
- *
- * Generalization:
- * f(x, y) := (x->id * NUM_RINGS * seqno_size) + (seqno_size * y->id)
- * ie. transpose of g(x, y)
- *
- * sync from sync from sync from sync from sync from
- * RCS VCS BCS VECS VCS2
- * --------------------------------------------------------------------
- * RCS | NOP (0x00) | VCS (0x28) | BCS (0x50) | VECS (0x78) | VCS2 (0xa0) |
- * |-------------------------------------------------------------------
- * VCS | RCS (0x08) | NOP (0x30) | BCS (0x58) | VECS (0x80) | VCS2 (0xa8) |
- * |-------------------------------------------------------------------
- * BCS | RCS (0x10) | VCS (0x38) | NOP (0x60) | VECS (0x88) | VCS2 (0xb0) |
- * |-------------------------------------------------------------------
- * VECS | RCS (0x18) | VCS (0x40) | BCS (0x68) | NOP (0x90) | VCS2 (0xb8) |
- * |-------------------------------------------------------------------
- * VCS2 | RCS (0x20) | VCS (0x48) | BCS (0x70) | VECS (0x98) | NOP (0xc0) |
- * |-------------------------------------------------------------------
- *
- * Generalization:
- * g(x, y) := (y->id * NUM_RINGS * seqno_size) + (seqno_size * x->id)
- * ie. transpose of f(x, y)
- */
- struct {
-#define GEN6_SEMAPHORE_LAST VECS_HW
-#define GEN6_NUM_SEMAPHORES (GEN6_SEMAPHORE_LAST + 1)
-#define GEN6_SEMAPHORES_MASK GENMASK(GEN6_SEMAPHORE_LAST, 0)
- struct {
- /* our mbox written by others */
- u32 wait[GEN6_NUM_SEMAPHORES];
- /* mboxes this ring signals to */
- i915_reg_t signal[GEN6_NUM_SEMAPHORES];
- } mbox;
-
- /* AKA wait() */
- int (*sync_to)(struct i915_request *rq,
- struct i915_request *signal);
- u32 *(*signal)(struct i915_request *rq, u32 *cs);
- } semaphore;
-
struct intel_engine_execlists execlists;
/* Contexts are pinned whilst they are active on the GPU. The last
@@ -808,7 +743,6 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
int __must_check intel_ring_cacheline_align(struct i915_request *rq);
-int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
@@ -889,7 +823,7 @@ intel_ring_set_tail(struct intel_ring *ring, unsigned int tail)
return tail;
}
-void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
+void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
void intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
@@ -903,6 +837,8 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine);
+void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
+
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
@@ -947,15 +883,6 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
-/*
- * Arbitrary size for largest possible 'add request' sequence. The code paths
- * are complex and variable. Empirical measurement shows that the worst case
- * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
- * we need to allocate double the largest single packet within that emission
- * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
- */
-#define MIN_SPACE_FOR_ADD_REQUEST 336
-
static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
{
return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
@@ -1055,7 +982,7 @@ static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
}
static inline u32 *
-gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
+gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
{
/* We're using qword write, offset should be aligned to 8 bytes. */
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
@@ -1065,8 +992,7 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset)
* following the batch.
*/
*cs++ = GFX_OP_PIPE_CONTROL(6);
- *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE;
+ *cs++ = flags | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB;
*cs++ = gtt_offset;
*cs++ = 0;
*cs++ = value;
@@ -1092,7 +1018,7 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
return cs;
}
-void intel_engines_sanitize(struct drm_i915_private *i915);
+void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 4350a5270423..a017a4232c0f 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -29,6 +29,8 @@
#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
+#include <drm/drm_print.h>
+
#include "i915_drv.h"
#include "intel_drv.h"
@@ -49,6 +51,268 @@
* present for a given platform.
*/
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
+#include <linux/sort.h>
+
+#define STACKDEPTH 8
+
+static noinline depot_stack_handle_t __save_depot_stack(void)
+{
+ unsigned long entries[STACKDEPTH];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = ARRAY_SIZE(entries),
+ .skip = 1,
+ };
+
+ save_stack_trace(&trace);
+ if (trace.nr_entries &&
+ trace.entries[trace.nr_entries - 1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
+}
+
+static void __print_depot_stack(depot_stack_handle_t stack,
+ char *buf, int sz, int indent)
+{
+ unsigned long entries[STACKDEPTH];
+ struct stack_trace trace = {
+ .entries = entries,
+ .max_entries = ARRAY_SIZE(entries),
+ };
+
+ depot_fetch_stack(stack, &trace);
+ snprint_stack_trace(buf, sz, &trace, indent);
+}
+
+static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+
+ spin_lock_init(&rpm->debug.lock);
+}
+
+static noinline depot_stack_handle_t
+track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ depot_stack_handle_t stack, *stacks;
+ unsigned long flags;
+
+ atomic_inc(&rpm->wakeref_count);
+ assert_rpm_wakelock_held(i915);
+
+ if (!HAS_RUNTIME_PM(i915))
+ return -1;
+
+ stack = __save_depot_stack();
+ if (!stack)
+ return -1;
+
+ spin_lock_irqsave(&rpm->debug.lock, flags);
+
+ if (!rpm->debug.count)
+ rpm->debug.last_acquire = stack;
+
+ stacks = krealloc(rpm->debug.owners,
+ (rpm->debug.count + 1) * sizeof(*stacks),
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (stacks) {
+ stacks[rpm->debug.count++] = stack;
+ rpm->debug.owners = stacks;
+ } else {
+ stack = -1;
+ }
+
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+ return stack;
+}
+
+static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ depot_stack_handle_t stack)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ unsigned long flags, n;
+ bool found = false;
+
+ if (unlikely(stack == -1))
+ return;
+
+ spin_lock_irqsave(&rpm->debug.lock, flags);
+ for (n = rpm->debug.count; n--; ) {
+ if (rpm->debug.owners[n] == stack) {
+ memmove(rpm->debug.owners + n,
+ rpm->debug.owners + n + 1,
+ (--rpm->debug.count - n) * sizeof(stack));
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
+
+ if (WARN(!found,
+ "Unmatched wakeref (tracking %lu), count %u\n",
+ rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
+
+ stack = READ_ONCE(rpm->debug.last_release);
+ if (stack) {
+ __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
+ }
+
+ kfree(buf);
+ }
+}
+
+static int cmphandle(const void *_a, const void *_b)
+{
+ const depot_stack_handle_t * const a = _a, * const b = _b;
+
+ if (*a < *b)
+ return -1;
+ else if (*a > *b)
+ return 1;
+ else
+ return 0;
+}
+
+static void
+__print_intel_runtime_pm_wakeref(struct drm_printer *p,
+ const struct intel_runtime_pm_debug *dbg)
+{
+ unsigned long i;
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ if (dbg->last_acquire) {
+ __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
+ drm_printf(p, "Wakeref last acquired:\n%s", buf);
+ }
+
+ if (dbg->last_release) {
+ __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
+ drm_printf(p, "Wakeref last released:\n%s", buf);
+ }
+
+ drm_printf(p, "Wakeref count: %lu\n", dbg->count);
+
+ sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
+
+ for (i = 0; i < dbg->count; i++) {
+ depot_stack_handle_t stack = dbg->owners[i];
+ unsigned long rep;
+
+ rep = 1;
+ while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
+ rep++, i++;
+ __print_depot_stack(stack, buf, PAGE_SIZE, 2);
+ drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
+ }
+
+ kfree(buf);
+}
+
+static noinline void
+untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ struct intel_runtime_pm_debug dbg = {};
+ struct drm_printer p;
+ unsigned long flags;
+
+ assert_rpm_wakelock_held(i915);
+ if (atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
+ &rpm->debug.lock,
+ flags)) {
+ dbg = rpm->debug;
+
+ rpm->debug.owners = NULL;
+ rpm->debug.count = 0;
+ rpm->debug.last_release = __save_depot_stack();
+
+ spin_unlock_irqrestore(&rpm->debug.lock, flags);
+ }
+ if (!dbg.count)
+ return;
+
+ p = drm_debug_printer("i915");
+ __print_intel_runtime_pm_wakeref(&p, &dbg);
+
+ kfree(dbg.owners);
+}
+
+void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
+ struct drm_printer *p)
+{
+ struct intel_runtime_pm_debug dbg = {};
+
+ do {
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ unsigned long alloc = dbg.count;
+ depot_stack_handle_t *s;
+
+ spin_lock_irq(&rpm->debug.lock);
+ dbg.count = rpm->debug.count;
+ if (dbg.count <= alloc) {
+ memcpy(dbg.owners,
+ rpm->debug.owners,
+ dbg.count * sizeof(*s));
+ }
+ dbg.last_acquire = rpm->debug.last_acquire;
+ dbg.last_release = rpm->debug.last_release;
+ spin_unlock_irq(&rpm->debug.lock);
+ if (dbg.count <= alloc)
+ break;
+
+ s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
+ if (!s)
+ goto out;
+
+ dbg.owners = s;
+ } while (1);
+
+ __print_intel_runtime_pm_wakeref(p, &dbg);
+
+out:
+ kfree(dbg.owners);
+}
+
+#else
+
+static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+}
+
+static depot_stack_handle_t
+track_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ atomic_inc(&i915->runtime_pm.wakeref_count);
+ assert_rpm_wakelock_held(i915);
+ return -1;
+}
+
+static void untrack_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
+{
+ assert_rpm_wakelock_held(i915);
+ atomic_dec(&i915->runtime_pm.wakeref_count);
+}
+
+#endif
+
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
@@ -509,7 +773,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
* BIOS's own request bits, which are forced-on for these power wells
* when exiting DC5/6.
*/
- if (IS_GEN9(dev_priv) && !IS_GEN9_LP(dev_priv) &&
+ if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
val |= I915_READ(regs->bios);
@@ -639,10 +903,10 @@ void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
* back on and register state is restored. This is guaranteed by the MMIO write
* to DC_STATE_EN blocking until the state is restored.
*/
-static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
+static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
{
- uint32_t val;
- uint32_t mask;
+ u32 val;
+ u32 mask;
if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
state &= dev_priv->csr.allowed_dc_mask;
@@ -1274,7 +1538,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
enum pipe pipe;
- uint32_t tmp;
+ u32 tmp;
WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
@@ -1591,18 +1855,19 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
-void intel_display_power_get(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
-
- intel_runtime_pm_get(dev_priv);
+ intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv);
mutex_lock(&power_domains->lock);
__intel_display_power_get_domain(dev_priv, domain);
mutex_unlock(&power_domains->lock);
+
+ return wakeref;
}
/**
@@ -1617,13 +1882,16 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
* Any power domain reference obtained by this function must have a symmetric
* call to intel_display_power_put() to release the reference again.
*/
-bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+intel_wakeref_t
+intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ intel_wakeref_t wakeref;
bool is_enabled;
- if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ if (!wakeref)
return false;
mutex_lock(&power_domains->lock);
@@ -1637,23 +1905,16 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- if (!is_enabled)
- intel_runtime_pm_put(dev_priv);
+ if (!is_enabled) {
+ intel_runtime_pm_put(dev_priv, wakeref);
+ wakeref = 0;
+ }
- return is_enabled;
+ return wakeref;
}
-/**
- * intel_display_power_put - release a power domain reference
- * @dev_priv: i915 device instance
- * @domain: power domain to reference
- *
- * This function drops the power domain reference obtained by
- * intel_display_power_get() and might power down the corresponding hardware
- * block right away if this is the last reference.
- */
-void intel_display_power_put(struct drm_i915_private *dev_priv,
- enum intel_display_power_domain domain)
+static void __intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
@@ -1671,9 +1932,33 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_power_well_put(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_put - release a power domain reference
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function drops the power domain reference obtained by
+ * intel_display_power_get() and might power down the corresponding hardware
+ * block right away if this is the last reference.
+ */
+void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put_unchecked(dev_priv);
+}
- intel_runtime_pm_put(dev_priv);
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_display_power_put(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
+{
+ __intel_display_power_put(dev_priv, domain);
+ intel_runtime_pm_put(dev_priv, wakeref);
}
+#endif
#define I830_PIPES_POWER_DOMAINS ( \
BIT_ULL(POWER_DOMAIN_PIPE_A) | \
@@ -3043,10 +3328,10 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv,
return 1;
}
-static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
- int enable_dc)
+static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
+ int enable_dc)
{
- uint32_t mask;
+ u32 mask;
int requested_dc;
int max_dc;
@@ -3058,7 +3343,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
* suspend/resume, so allow it unconditionally.
*/
mask = DC_STATE_EN_DC9;
- } else if (IS_GEN10(dev_priv) || IS_GEN9_BC(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) {
max_dc = 2;
mask = 0;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -3311,7 +3596,7 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
- uint32_t val;
+ u32 val;
val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
MBUS_ABOX_BT_CREDIT_POOL2(16) |
@@ -3622,7 +3907,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* current lane status.
*/
if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
- uint32_t status = I915_READ(DPLL(PIPE_A));
+ u32 status = I915_READ(DPLL(PIPE_A));
unsigned int mask;
mask = status & DPLL_PORTB_READY_MASK;
@@ -3653,7 +3938,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
}
if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
- uint32_t status = I915_READ(DPIO_PHY_STATUS);
+ u32 status = I915_READ(DPIO_PHY_STATUS);
unsigned int mask;
mask = status & DPLL_PORTD_READY_MASK;
@@ -3712,7 +3997,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
/**
* intel_power_domains_init_hw - initialize hardware power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
@@ -3726,30 +4011,31 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_fini_hw().
*/
-void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
+void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &i915->power_domains;
power_domains->initializing = true;
- if (IS_ICELAKE(dev_priv)) {
- icl_display_core_init(dev_priv, resume);
- } else if (IS_CANNONLAKE(dev_priv)) {
- cnl_display_core_init(dev_priv, resume);
- } else if (IS_GEN9_BC(dev_priv)) {
- skl_display_core_init(dev_priv, resume);
- } else if (IS_GEN9_LP(dev_priv)) {
- bxt_display_core_init(dev_priv, resume);
- } else if (IS_CHERRYVIEW(dev_priv)) {
+ if (IS_ICELAKE(i915)) {
+ icl_display_core_init(i915, resume);
+ } else if (IS_CANNONLAKE(i915)) {
+ cnl_display_core_init(i915, resume);
+ } else if (IS_GEN9_BC(i915)) {
+ skl_display_core_init(i915, resume);
+ } else if (IS_GEN9_LP(i915)) {
+ bxt_display_core_init(i915, resume);
+ } else if (IS_CHERRYVIEW(i915)) {
mutex_lock(&power_domains->lock);
- chv_phy_control_init(dev_priv);
+ chv_phy_control_init(i915);
mutex_unlock(&power_domains->lock);
- } else if (IS_VALLEYVIEW(dev_priv)) {
+ } else if (IS_VALLEYVIEW(i915)) {
mutex_lock(&power_domains->lock);
- vlv_cmnlane_wa(dev_priv);
+ vlv_cmnlane_wa(i915);
mutex_unlock(&power_domains->lock);
- } else if (IS_IVYBRIDGE(dev_priv) || INTEL_GEN(dev_priv) >= 7)
- intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
+ } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
+ intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
+ }
/*
* Keep all power wells enabled for any dependent HW access during
@@ -3757,18 +4043,20 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
* resources powered until display HW readout is complete. We drop
* this reference in intel_power_domains_enable().
*/
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
/* Disable power support if the user asked so. */
if (!i915_modparams.disable_power_well)
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- intel_power_domains_sync_hw(dev_priv);
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+ intel_power_domains_sync_hw(i915);
power_domains->initializing = false;
}
/**
* intel_power_domains_fini_hw - deinitialize hw power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* De-initializes the display power domain HW state. It also ensures that the
* device stays powered up so that the driver can be reloaded.
@@ -3777,21 +4065,24 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
* intel_power_domains_disable()) and must be paired with
* intel_power_domains_init_hw().
*/
-void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
+void intel_power_domains_fini_hw(struct drm_i915_private *i915)
{
- /* Keep the power well enabled, but cancel its rpm wakeref. */
- intel_runtime_pm_put(dev_priv);
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&i915->power_domains.wakeref);
/* Remove the refcount we took to keep power well support disabled. */
if (!i915_modparams.disable_power_well)
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+
+ intel_power_domains_verify_state(i915);
- intel_power_domains_verify_state(dev_priv);
+ /* Keep the power well enabled, but cancel its rpm wakeref. */
+ intel_runtime_pm_put(i915, wakeref);
}
/**
* intel_power_domains_enable - enable toggling of display power wells
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Enable the ondemand enabling/disabling of the display power wells. Note that
* power wells not belonging to POWER_DOMAIN_INIT are allowed to be toggled
@@ -3801,30 +4092,36 @@ void intel_power_domains_fini_hw(struct drm_i915_private *dev_priv)
* of display HW readout (which will acquire the power references reflecting
* the current HW state).
*/
-void intel_power_domains_enable(struct drm_i915_private *dev_priv)
+void intel_power_domains_enable(struct drm_i915_private *i915)
{
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&i915->power_domains.wakeref);
- intel_power_domains_verify_state(dev_priv);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
+ intel_power_domains_verify_state(i915);
}
/**
* intel_power_domains_disable - disable toggling of display power wells
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Disable the ondemand enabling/disabling of the display power wells. See
* intel_power_domains_enable() for which power wells this call controls.
*/
-void intel_power_domains_disable(struct drm_i915_private *dev_priv)
+void intel_power_domains_disable(struct drm_i915_private *i915)
{
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ struct i915_power_domains *power_domains = &i915->power_domains;
- intel_power_domains_verify_state(dev_priv);
+ WARN_ON(power_domains->wakeref);
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
+
+ intel_power_domains_verify_state(i915);
}
/**
* intel_power_domains_suspend - suspend power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
* @suspend_mode: specifies the target suspend state (idle, mem, hibernation)
*
* This function prepares the hardware power domain state before entering
@@ -3833,12 +4130,14 @@ void intel_power_domains_disable(struct drm_i915_private *dev_priv)
* It must be called with power domains already disabled (after a call to
* intel_power_domains_disable()) and paired with intel_power_domains_resume().
*/
-void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
+void intel_power_domains_suspend(struct drm_i915_private *i915,
enum i915_drm_suspend_mode suspend_mode)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &i915->power_domains;
+ intel_wakeref_t wakeref __maybe_unused =
+ fetch_and_zero(&power_domains->wakeref);
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+ intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref);
/*
* In case of suspend-to-idle (aka S0ix) on a DMC platform without DC9
@@ -3847,10 +4146,10 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
* resources as required and also enable deeper system power states
* that would be blocked if the firmware was inactive.
*/
- if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
+ if (!(i915->csr.allowed_dc_mask & DC_STATE_EN_DC9) &&
suspend_mode == I915_DRM_SUSPEND_IDLE &&
- dev_priv->csr.dmc_payload != NULL) {
- intel_power_domains_verify_state(dev_priv);
+ i915->csr.dmc_payload) {
+ intel_power_domains_verify_state(i915);
return;
}
@@ -3859,25 +4158,25 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
* power wells if power domains must be deinitialized for suspend.
*/
if (!i915_modparams.disable_power_well) {
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- intel_power_domains_verify_state(dev_priv);
+ intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT);
+ intel_power_domains_verify_state(i915);
}
- if (IS_ICELAKE(dev_priv))
- icl_display_core_uninit(dev_priv);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_display_core_uninit(dev_priv);
- else if (IS_GEN9_BC(dev_priv))
- skl_display_core_uninit(dev_priv);
- else if (IS_GEN9_LP(dev_priv))
- bxt_display_core_uninit(dev_priv);
+ if (IS_ICELAKE(i915))
+ icl_display_core_uninit(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_display_core_uninit(i915);
+ else if (IS_GEN9_BC(i915))
+ skl_display_core_uninit(i915);
+ else if (IS_GEN9_LP(i915))
+ bxt_display_core_uninit(i915);
power_domains->display_core_suspended = true;
}
/**
* intel_power_domains_resume - resume power domain state
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function resume the hardware power domain state during system resume.
*
@@ -3885,28 +4184,30 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
* intel_power_domains_enable()) and must be paired with
* intel_power_domains_suspend().
*/
-void intel_power_domains_resume(struct drm_i915_private *dev_priv)
+void intel_power_domains_resume(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &i915->power_domains;
if (power_domains->display_core_suspended) {
- intel_power_domains_init_hw(dev_priv, true);
+ intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
- intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ WARN_ON(power_domains->wakeref);
+ power_domains->wakeref =
+ intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
- intel_power_domains_verify_state(dev_priv);
+ intel_power_domains_verify_state(i915);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
+static void intel_power_domains_dump_info(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &i915->power_domains;
struct i915_power_well *power_well;
- for_each_power_well(dev_priv, power_well) {
+ for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
DRM_DEBUG_DRIVER("%-25s %d\n",
@@ -3921,7 +4222,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
/**
* intel_power_domains_verify_state - verify the HW/SW state for all power wells
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* Verify if the reference count of each power well matches its HW enabled
* state and the total refcount of the domains it belongs to. This must be
@@ -3929,22 +4230,21 @@ static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
* acquiring reference counts for any power wells in use and disabling the
* ones left on by BIOS but not required by any active output.
*/
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_domains *power_domains = &i915->power_domains;
struct i915_power_well *power_well;
bool dump_domain_info;
mutex_lock(&power_domains->lock);
dump_domain_info = false;
- for_each_power_well(dev_priv, power_well) {
+ for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
int domains_count;
bool enabled;
- enabled = power_well->desc->ops->is_enabled(dev_priv,
- power_well);
+ enabled = power_well->desc->ops->is_enabled(i915, power_well);
if ((power_well->count || power_well->desc->always_on) !=
enabled)
DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
@@ -3968,7 +4268,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
static bool dumped;
if (!dumped) {
- intel_power_domains_dump_info(dev_priv);
+ intel_power_domains_dump_info(i915);
dumped = true;
}
}
@@ -3978,7 +4278,7 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
#else
-static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+static void intel_power_domains_verify_state(struct drm_i915_private *i915)
{
}
@@ -3986,30 +4286,31 @@ static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
/**
* intel_runtime_pm_get - grab a runtime pm reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on) and ensures that it is powered up.
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
+ *
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
+intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
int ret;
ret = pm_runtime_get_sync(kdev);
WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
- atomic_inc(&dev_priv->runtime_pm.wakeref_count);
- assert_rpm_wakelock_held(dev_priv);
+ return track_intel_runtime_pm_wakeref(i915);
}
/**
* intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function grabs a device-level runtime pm reference if the device is
* already in use and ensures that it is powered up. It is illegal to try
@@ -4018,12 +4319,13 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
*
- * Returns: True if the wakeref was acquired, or False otherwise.
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
+ * as True if the wakeref was acquired, or False otherwise.
*/
-bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915)
{
if (IS_ENABLED(CONFIG_PM)) {
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
/*
@@ -4033,18 +4335,15 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
* atm to the late/early system suspend/resume handlers.
*/
if (pm_runtime_get_if_in_use(kdev) <= 0)
- return false;
+ return 0;
}
- atomic_inc(&dev_priv->runtime_pm.wakeref_count);
- assert_rpm_wakelock_held(dev_priv);
-
- return true;
+ return track_intel_runtime_pm_wakeref(i915);
}
/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function grabs a device-level runtime pm reference (mostly used for GEM
* code to ensure the GTT or GT is on).
@@ -4058,41 +4357,50 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
*
* Any runtime pm reference obtained by this function must have a symmetric
* call to intel_runtime_pm_put() to release the reference again.
+ *
+ * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
*/
-void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
+intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
- assert_rpm_wakelock_held(dev_priv);
+ assert_rpm_wakelock_held(i915);
pm_runtime_get_noresume(kdev);
- atomic_inc(&dev_priv->runtime_pm.wakeref_count);
+ return track_intel_runtime_pm_wakeref(i915);
}
/**
* intel_runtime_pm_put - release a runtime pm reference
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function drops the device-level runtime pm reference obtained by
* intel_runtime_pm_get() and might power down the corresponding
* hardware block right away if this is the last reference.
*/
-void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
- assert_rpm_wakelock_held(dev_priv);
- atomic_dec(&dev_priv->runtime_pm.wakeref_count);
+ untrack_intel_runtime_pm_wakeref(i915);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref)
+{
+ cancel_intel_runtime_pm_wakeref(i915, wref);
+ intel_runtime_pm_put_unchecked(i915);
+}
+#endif
+
/**
* intel_runtime_pm_enable - enable runtime pm
- * @dev_priv: i915 device instance
+ * @i915: i915 device instance
*
* This function enables runtime pm at the end of the driver load sequence.
*
@@ -4100,9 +4408,9 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
* subordinate display power domains. That is done by
* intel_power_domains_enable().
*/
-void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_enable(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
/*
@@ -4124,7 +4432,7 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* so the driver's own RPM reference tracking asserts also work on
* platforms without RPM support.
*/
- if (!HAS_RUNTIME_PM(dev_priv)) {
+ if (!HAS_RUNTIME_PM(i915)) {
int ret;
pm_runtime_dont_use_autosuspend(kdev);
@@ -4142,17 +4450,35 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
pm_runtime_put_autosuspend(kdev);
}
-void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
+void intel_runtime_pm_disable(struct drm_i915_private *i915)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
/* Transfer rpm ownership back to core */
- WARN(pm_runtime_get_sync(&dev_priv->drm.pdev->dev) < 0,
+ WARN(pm_runtime_get_sync(kdev) < 0,
"Failed to pass rpm ownership back to core\n");
pm_runtime_dont_use_autosuspend(kdev);
- if (!HAS_RUNTIME_PM(dev_priv))
+ if (!HAS_RUNTIME_PM(i915))
pm_runtime_put(kdev);
}
+
+void intel_runtime_pm_cleanup(struct drm_i915_private *i915)
+{
+ struct i915_runtime_pm *rpm = &i915->runtime_pm;
+ int count;
+
+ count = atomic_fetch_inc(&rpm->wakeref_count); /* balance untrack */
+ WARN(count,
+ "i915->runtime_pm.wakeref_count=%d on cleanup\n",
+ count);
+
+ untrack_intel_runtime_pm_wakeref(i915);
+}
+
+void intel_runtime_pm_init_early(struct drm_i915_private *i915)
+{
+ init_intel_runtime_pm_wakeref(i915);
+}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 4a03b7f67dd5..df2d830a7405 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -29,7 +29,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/export.h>
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d2e003d8f3db..b02d3d9809e3 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -29,7 +29,6 @@
* registers; newer ones are much simpler and we can use the new DRM plane
* support.
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
@@ -322,8 +321,8 @@ skl_program_scaler(struct intel_plane *plane,
&crtc_state->scaler_state.scalers[scaler_id];
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->base.dst);
u16 y_hphase, uv_rgb_hphase;
u16 y_vphase, uv_rgb_vphase;
int hscale, vscale;
@@ -478,10 +477,10 @@ skl_program_plane(struct intel_plane *plane,
u32 aux_stride = skl_plane_stride(plane_state, 1);
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t x = plane_state->color_plane[color_plane].x;
- uint32_t y = plane_state->color_plane[color_plane].y;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 x = plane_state->color_plane[color_plane].x;
+ u32 y = plane_state->color_plane[color_plane].y;
+ u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
struct intel_plane *linked = plane_state->linked_plane;
const struct drm_framebuffer *fb = plane_state->base.fb;
u8 alpha = plane_state->base.alpha >> 8;
@@ -619,17 +618,19 @@ skl_plane_get_hw_state(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
+ intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -813,10 +814,10 @@ vlv_update_plane(struct intel_plane *plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->color_plane[0].x;
- uint32_t y = plane_state->color_plane[0].y;
+ u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ u32 x = plane_state->color_plane[0].x;
+ u32 y = plane_state->color_plane[0].y;
unsigned long irqflags;
/* Sizes are 0 based */
@@ -883,17 +884,19 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
enum plane_id plane_id = plane->id;
+ intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -973,12 +976,12 @@ ivb_update_plane(struct intel_plane *plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->color_plane[0].x;
- uint32_t y = plane_state->color_plane[0].y;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ u32 x = plane_state->color_plane[0].x;
+ u32 y = plane_state->color_plane[0].y;
+ u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
/* Sizes are 0 based */
@@ -1052,17 +1055,19 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -1087,7 +1092,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE;
- if (IS_GEN6(dev_priv))
+ if (IS_GEN(dev_priv, 6))
dvscntr |= DVS_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
@@ -1147,12 +1152,12 @@ g4x_update_plane(struct intel_plane *plane,
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
int crtc_x = plane_state->base.dst.x1;
int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->color_plane[0].x;
- uint32_t y = plane_state->color_plane[0].y;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ u32 crtc_w = drm_rect_width(&plane_state->base.dst);
+ u32 crtc_h = drm_rect_height(&plane_state->base.dst);
+ u32 x = plane_state->color_plane[0].x;
+ u32 y = plane_state->color_plane[0].y;
+ u32 src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ u32 src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
/* Sizes are 0 based */
@@ -1218,17 +1223,19 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
bool ret;
power_domain = POWER_DOMAIN_PIPE(plane->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return false;
ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
*pipe = plane->pipe;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
}
@@ -1699,7 +1706,7 @@ out:
return ret;
}
-static const uint32_t g4x_plane_formats[] = {
+static const u32 g4x_plane_formats[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
@@ -1707,13 +1714,13 @@ static const uint32_t g4x_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const uint64_t i9xx_plane_format_modifiers[] = {
+static const u64 i9xx_plane_format_modifiers[] = {
I915_FORMAT_MOD_X_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
-static const uint32_t snb_plane_formats[] = {
+static const u32 snb_plane_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_YUYV,
@@ -1722,7 +1729,7 @@ static const uint32_t snb_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const uint32_t vlv_plane_formats[] = {
+static const u32 vlv_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB8888,
@@ -1736,7 +1743,7 @@ static const uint32_t vlv_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const uint32_t skl_plane_formats[] = {
+static const u32 skl_plane_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1751,7 +1758,7 @@ static const uint32_t skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
-static const uint32_t skl_planar_formats[] = {
+static const u32 skl_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -1767,7 +1774,7 @@ static const uint32_t skl_planar_formats[] = {
DRM_FORMAT_NV12,
};
-static const uint64_t skl_plane_format_modifiers_noccs[] = {
+static const u64 skl_plane_format_modifiers_noccs[] = {
I915_FORMAT_MOD_Yf_TILED,
I915_FORMAT_MOD_Y_TILED,
I915_FORMAT_MOD_X_TILED,
@@ -1775,7 +1782,7 @@ static const uint64_t skl_plane_format_modifiers_noccs[] = {
DRM_FORMAT_MOD_INVALID
};
-static const uint64_t skl_plane_format_modifiers_ccs[] = {
+static const u64 skl_plane_format_modifiers_ccs[] = {
I915_FORMAT_MOD_Yf_TILED_CCS,
I915_FORMAT_MOD_Y_TILED_CCS,
I915_FORMAT_MOD_Yf_TILED,
@@ -1983,7 +1990,7 @@ static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))
return false;
- if (IS_GEN9(dev_priv) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
+ if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv) && pipe == PIPE_C)
return false;
if (plane_id != PLANE_PRIMARY && plane_id != PLANE_SPRITE0)
@@ -2163,7 +2170,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->check_plane = g4x_sprite_check;
modifiers = i9xx_plane_format_modifiers;
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
formats = snb_plane_formats;
num_formats = ARRAY_SIZE(snb_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9bbe35a0f0f2..bd5536f0ec92 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -30,7 +30,6 @@
* Integrated TV-out support for the 915GM and 945GM.
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index b34c318b238d..e711eb3268bc 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -26,6 +26,7 @@
#include "intel_guc_submission.h"
#include "intel_guc.h"
#include "i915_drv.h"
+#include "i915_reset.h"
static void guc_free_load_err_log(struct intel_guc *guc);
@@ -71,7 +72,7 @@ static int __get_default_guc_log_level(struct drm_i915_private *i915)
{
int guc_log_level;
- if (!HAS_GUC(i915) || !intel_uc_is_using_guc())
+ if (!HAS_GUC(i915) || !intel_uc_is_using_guc(i915))
guc_log_level = GUC_LOG_LEVEL_DISABLED;
else if (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
@@ -112,11 +113,11 @@ static void sanitize_options_early(struct drm_i915_private *i915)
DRM_DEBUG_DRIVER("enable_guc=%d (submission:%s huc:%s)\n",
i915_modparams.enable_guc,
- yesno(intel_uc_is_using_guc_submission()),
- yesno(intel_uc_is_using_huc()));
+ yesno(intel_uc_is_using_guc_submission(i915)),
+ yesno(intel_uc_is_using_huc(i915)));
/* Verify GuC firmware availability */
- if (intel_uc_is_using_guc() && !intel_uc_fw_is_selected(guc_fw)) {
+ if (intel_uc_is_using_guc(i915) && !intel_uc_fw_is_selected(guc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
!HAS_GUC(i915) ? "no GuC hardware" :
@@ -124,7 +125,7 @@ static void sanitize_options_early(struct drm_i915_private *i915)
}
/* Verify HuC firmware availability */
- if (intel_uc_is_using_huc() && !intel_uc_fw_is_selected(huc_fw)) {
+ if (intel_uc_is_using_huc(i915) && !intel_uc_fw_is_selected(huc_fw)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"enable_guc", i915_modparams.enable_guc,
!HAS_HUC(i915) ? "no HuC hardware" :
@@ -136,7 +137,7 @@ static void sanitize_options_early(struct drm_i915_private *i915)
i915_modparams.guc_log_level =
__get_default_guc_log_level(i915);
- if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc()) {
+ if (i915_modparams.guc_log_level > 0 && !intel_uc_is_using_guc(i915)) {
DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
"guc_log_level", i915_modparams.guc_log_level,
!HAS_GUC(i915) ? "no GuC hardware" :
@@ -354,7 +355,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
/* WaEnableuKernelHeaderValidFix:skl */
/* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
- if (IS_GEN9(i915))
+ if (IS_GEN(i915, 9))
attempts = 3;
else
attempts = 1;
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 25d73ada74ae..870faf9011b9 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -41,19 +41,19 @@ void intel_uc_fini(struct drm_i915_private *dev_priv);
int intel_uc_suspend(struct drm_i915_private *dev_priv);
int intel_uc_resume(struct drm_i915_private *dev_priv);
-static inline bool intel_uc_is_using_guc(void)
+static inline bool intel_uc_is_using_guc(struct drm_i915_private *i915)
{
GEM_BUG_ON(i915_modparams.enable_guc < 0);
return i915_modparams.enable_guc > 0;
}
-static inline bool intel_uc_is_using_guc_submission(void)
+static inline bool intel_uc_is_using_guc_submission(struct drm_i915_private *i915)
{
GEM_BUG_ON(i915_modparams.enable_guc < 0);
return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
}
-static inline bool intel_uc_is_using_huc(void)
+static inline bool intel_uc_is_using_huc(struct drm_i915_private *i915)
{
GEM_BUG_ON(i915_modparams.enable_guc < 0);
return i915_modparams.enable_guc & ENABLE_GUC_LOAD_HUC;
diff --git a/drivers/gpu/drm/i915/intel_uc_fw.c b/drivers/gpu/drm/i915/intel_uc_fw.c
index fd496416087c..becf05ebae4d 100644
--- a/drivers/gpu/drm/i915/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/intel_uc_fw.c
@@ -46,12 +46,17 @@ void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
size_t size;
int err;
+ if (!uc_fw->path) {
+ dev_info(dev_priv->drm.dev,
+ "%s: No firmware was defined for %s!\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ intel_platform_name(INTEL_INFO(dev_priv)->platform));
+ return;
+ }
+
DRM_DEBUG_DRIVER("%s fw fetch %s\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
- if (!uc_fw->path)
- return;
-
uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("%s fw fetch %s\n",
intel_uc_fw_type_repr(uc_fw->type),
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 9289515108c3..e88f0252d77e 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -528,7 +528,7 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
ret |= vlv_check_for_unclaimed_mmio(dev_priv);
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+ if (IS_GEN_RANGE(dev_priv, 6, 7))
ret |= gen6_check_for_fifo_debug(dev_priv);
return ret;
@@ -556,7 +556,7 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
dev_priv->uncore.funcs.force_wake_get(dev_priv,
restore_forcewake);
- if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
+ if (IS_GEN_RANGE(dev_priv, 6, 7))
dev_priv->uncore.fifo_count =
fifo_free_entries(dev_priv);
spin_unlock_irq(&dev_priv->uncore.lock);
@@ -1398,7 +1398,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
return;
- if (IS_GEN6(dev_priv)) {
+ if (IS_GEN(dev_priv, 6)) {
dev_priv->uncore.fw_reset = 0;
dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
dev_priv->uncore.fw_clear = 0;
@@ -1437,7 +1437,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
- } else if (IS_GEN10(dev_priv) || IS_GEN9(dev_priv)) {
+ } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_fallback;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1503,7 +1503,7 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
- } else if (IS_GEN6(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 6)) {
dev_priv->uncore.funcs.force_wake_get =
fw_domains_get_with_thread_status;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1567,13 +1567,13 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
dev_priv->uncore.pmic_bus_access_nb.notifier_call =
i915_pmic_bus_access_notifier;
- if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
+ if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
- } else if (IS_GEN5(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 5)) {
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
- } else if (IS_GEN(dev_priv, 6, 7)) {
+ } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
if (IS_VALLEYVIEW(dev_priv)) {
@@ -1582,7 +1582,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
} else {
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
}
- } else if (IS_GEN8(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 8)) {
if (IS_CHERRYVIEW(dev_priv)) {
ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
@@ -1592,7 +1592,7 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
}
- } else if (IS_GEN(dev_priv, 9, 10)) {
+ } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
@@ -1670,6 +1670,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_reg_read *reg = data;
struct reg_whitelist const *entry;
+ intel_wakeref_t wakeref;
unsigned int flags;
int remain;
int ret = 0;
@@ -1695,286 +1696,25 @@ int i915_reg_read_ioctl(struct drm_device *dev,
flags = reg->offset & (entry->size - 1);
- intel_runtime_pm_get(dev_priv);
- if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
- reg->val = I915_READ64_2x32(entry->offset_ldw,
- entry->offset_udw);
- else if (entry->size == 8 && flags == 0)
- reg->val = I915_READ64(entry->offset_ldw);
- else if (entry->size == 4 && flags == 0)
- reg->val = I915_READ(entry->offset_ldw);
- else if (entry->size == 2 && flags == 0)
- reg->val = I915_READ16(entry->offset_ldw);
- else if (entry->size == 1 && flags == 0)
- reg->val = I915_READ8(entry->offset_ldw);
- else
- ret = -EINVAL;
- intel_runtime_pm_put(dev_priv);
-
- return ret;
-}
-
-static void gen3_stop_engine(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- const u32 base = engine->mmio_base;
-
- if (intel_engine_stop_cs(engine))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
-
- I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
- POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
-
- I915_WRITE_FW(RING_HEAD(base), 0);
- I915_WRITE_FW(RING_TAIL(base), 0);
- POSTING_READ_FW(RING_TAIL(base));
-
- /* The ring must be empty before it is disabled */
- I915_WRITE_FW(RING_CTL(base), 0);
-
- /* Check acts as a post */
- if (I915_READ_FW(RING_HEAD(base)) != 0)
- DRM_DEBUG_DRIVER("%s: ring head not parked\n",
- engine->name);
-}
-
-static void i915_stop_engines(struct drm_i915_private *dev_priv,
- unsigned int engine_mask)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (INTEL_GEN(dev_priv) < 3)
- return;
-
- for_each_engine_masked(engine, dev_priv, engine_mask, id)
- gen3_stop_engine(engine);
-}
-
-static bool i915_in_reset(struct pci_dev *pdev)
-{
- u8 gdrst;
-
- pci_read_config_byte(pdev, I915_GDRST, &gdrst);
- return gdrst & GRDOM_RESET_STATUS;
-}
-
-static int i915_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- int err;
-
- /* Assert reset for at least 20 usec, and wait for acknowledgement. */
- pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
- usleep_range(50, 200);
- err = wait_for(i915_in_reset(pdev), 500);
-
- /* Clear the reset request. */
- pci_write_config_byte(pdev, I915_GDRST, 0);
- usleep_range(50, 200);
- if (!err)
- err = wait_for(!i915_in_reset(pdev), 500);
-
- return err;
-}
-
-static bool g4x_reset_complete(struct pci_dev *pdev)
-{
- u8 gdrst;
-
- pci_read_config_byte(pdev, I915_GDRST, &gdrst);
- return (gdrst & GRDOM_RESET_ENABLE) == 0;
-}
-
-static int g33_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
- return wait_for(g4x_reset_complete(pdev), 500);
-}
-
-static int g4x_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- int ret;
-
- /* WaVcpClkGateDisableForMediaReset:ctg,elk */
- I915_WRITE(VDECCLK_GATE_D,
- I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ(VDECCLK_GATE_D);
-
- pci_write_config_byte(pdev, I915_GDRST,
- GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(pdev), 500);
- if (ret) {
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
- goto out;
- }
-
- pci_write_config_byte(pdev, I915_GDRST,
- GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(g4x_reset_complete(pdev), 500);
- if (ret) {
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
- goto out;
- }
-
-out:
- pci_write_config_byte(pdev, I915_GDRST, 0);
-
- I915_WRITE(VDECCLK_GATE_D,
- I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ(VDECCLK_GATE_D);
-
- return ret;
-}
-
-static int ironlake_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- int ret;
-
- I915_WRITE(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
- ret = intel_wait_for_register(dev_priv,
- ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
- 500);
- if (ret) {
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
- goto out;
- }
-
- I915_WRITE(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
- ret = intel_wait_for_register(dev_priv,
- ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
- 500);
- if (ret) {
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
- goto out;
+ with_intel_runtime_pm(dev_priv, wakeref) {
+ if (entry->size == 8 && flags == I915_REG_READ_8B_WA)
+ reg->val = I915_READ64_2x32(entry->offset_ldw,
+ entry->offset_udw);
+ else if (entry->size == 8 && flags == 0)
+ reg->val = I915_READ64(entry->offset_ldw);
+ else if (entry->size == 4 && flags == 0)
+ reg->val = I915_READ(entry->offset_ldw);
+ else if (entry->size == 2 && flags == 0)
+ reg->val = I915_READ16(entry->offset_ldw);
+ else if (entry->size == 1 && flags == 0)
+ reg->val = I915_READ8(entry->offset_ldw);
+ else
+ ret = -EINVAL;
}
-out:
- I915_WRITE(ILK_GDSR, 0);
- POSTING_READ(ILK_GDSR);
return ret;
}
-/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
-static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
- u32 hw_domain_mask)
-{
- int err;
-
- /* GEN6_GDRST is not in the gt power well, no need to check
- * for fifo space for the write or forcewake the chip for
- * the read
- */
- __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
-
- /* Wait for the device to ack the reset requests */
- err = __intel_wait_for_register_fw(dev_priv,
- GEN6_GDRST, hw_domain_mask, 0,
- 500, 0,
- NULL);
- if (err)
- DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
- hw_domain_mask);
-
- return err;
-}
-
-/**
- * gen6_reset_engines - reset individual engines
- * @dev_priv: i915 device
- * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
- * @retry: the count of of previous attempts to reset.
- *
- * This function will reset the individual engines that are set in engine_mask.
- * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
- *
- * Note: It is responsibility of the caller to handle the difference between
- * asking full domain reset versus reset for all available individual engines.
- *
- * Returns 0 on success, nonzero on error.
- */
-static int gen6_reset_engines(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- struct intel_engine_cs *engine;
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
- [RCS] = GEN6_GRDOM_RENDER,
- [BCS] = GEN6_GRDOM_BLT,
- [VCS] = GEN6_GRDOM_MEDIA,
- [VCS2] = GEN8_GRDOM_MEDIA2,
- [VECS] = GEN6_GRDOM_VECS,
- };
- u32 hw_mask;
-
- if (engine_mask == ALL_ENGINES) {
- hw_mask = GEN6_GRDOM_FULL;
- } else {
- unsigned int tmp;
-
- hw_mask = 0;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- hw_mask |= hw_engine_mask[engine->id];
- }
-
- return gen6_hw_domain_reset(dev_priv, hw_mask);
-}
-
-/**
- * gen11_reset_engines - reset individual engines
- * @dev_priv: i915 device
- * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
- *
- * This function will reset the individual engines that are set in engine_mask.
- * If you provide ALL_ENGINES as mask, full global domain reset will be issued.
- *
- * Note: It is responsibility of the caller to handle the difference between
- * asking full domain reset versus reset for all available individual engines.
- *
- * Returns 0 on success, nonzero on error.
- */
-static int gen11_reset_engines(struct drm_i915_private *dev_priv,
- unsigned int engine_mask)
-{
- struct intel_engine_cs *engine;
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
- [RCS] = GEN11_GRDOM_RENDER,
- [BCS] = GEN11_GRDOM_BLT,
- [VCS] = GEN11_GRDOM_MEDIA,
- [VCS2] = GEN11_GRDOM_MEDIA2,
- [VCS3] = GEN11_GRDOM_MEDIA3,
- [VCS4] = GEN11_GRDOM_MEDIA4,
- [VECS] = GEN11_GRDOM_VECS,
- [VECS2] = GEN11_GRDOM_VECS2,
- };
- u32 hw_mask;
-
- BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
-
- if (engine_mask == ALL_ENGINES) {
- hw_mask = GEN11_GRDOM_FULL;
- } else {
- unsigned int tmp;
-
- hw_mask = 0;
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- hw_mask |= hw_engine_mask[engine->id];
- }
-
- return gen6_hw_domain_reset(dev_priv, hw_mask);
-}
-
/**
* __intel_wait_for_register_fw - wait until register matches expected state
* @dev_priv: the i915 device
@@ -2085,196 +1825,6 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
return ret;
}
-static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
-
- ret = __intel_wait_for_register_fw(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700, 0,
- NULL);
- if (ret)
- DRM_ERROR("%s: reset request timeout\n", engine->name);
-
- return ret;
-}
-
-static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
-}
-
-static int reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
- unsigned int retry)
-{
- if (INTEL_GEN(i915) >= 11)
- return gen11_reset_engines(i915, engine_mask);
- else
- return gen6_reset_engines(i915, engine_mask, retry);
-}
-
-static int gen8_reset_engines(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
- unsigned int retry)
-{
- struct intel_engine_cs *engine;
- const bool reset_non_ready = retry >= 1;
- unsigned int tmp;
- int ret;
-
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
- ret = gen8_engine_reset_prepare(engine);
- if (ret && !reset_non_ready)
- goto skip_reset;
-
- /*
- * If this is not the first failed attempt to prepare,
- * we decide to proceed anyway.
- *
- * By doing so we risk context corruption and with
- * some gens (kbl), possible system hang if reset
- * happens during active bb execution.
- *
- * We rather take context corruption instead of
- * failed reset with a wedged driver/gpu. And
- * active bb execution case should be covered by
- * i915_stop_engines we have before the reset.
- */
- }
-
- ret = reset_engines(dev_priv, engine_mask, retry);
-
-skip_reset:
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
- gen8_engine_reset_cancel(engine);
-
- return ret;
-}
-
-typedef int (*reset_func)(struct drm_i915_private *,
- unsigned int engine_mask, unsigned int retry);
-
-static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
-{
- if (!i915_modparams.reset)
- return NULL;
-
- if (INTEL_GEN(dev_priv) >= 8)
- return gen8_reset_engines;
- else if (INTEL_GEN(dev_priv) >= 6)
- return gen6_reset_engines;
- else if (IS_GEN5(dev_priv))
- return ironlake_do_reset;
- else if (IS_G4X(dev_priv))
- return g4x_do_reset;
- else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
- return g33_do_reset;
- else if (INTEL_GEN(dev_priv) >= 3)
- return i915_do_reset;
- else
- return NULL;
-}
-
-int intel_gpu_reset(struct drm_i915_private *dev_priv,
- const unsigned int engine_mask)
-{
- reset_func reset = intel_get_gpu_reset(dev_priv);
- unsigned int retry;
- int ret;
-
- GEM_BUG_ON(!engine_mask);
-
- /*
- * We want to perform per-engine reset from atomic context (e.g.
- * softirq), which imposes the constraint that we cannot sleep.
- * However, experience suggests that spending a bit of time waiting
- * for a reset helps in various cases, so for a full-device reset
- * we apply the opposite rule and wait if we want to. As we should
- * always follow up a failed per-engine reset with a full device reset,
- * being a little faster, stricter and more error prone for the
- * atomic case seems an acceptable compromise.
- *
- * Unfortunately this leads to a bimodal routine, when the goal was
- * to have a single reset function that worked for resetting any
- * number of engines simultaneously.
- */
- might_sleep_if(engine_mask == ALL_ENGINES);
-
- /*
- * If the power well sleeps during the reset, the reset
- * request may be dropped and never completes (causing -EIO).
- */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- for (retry = 0; retry < 3; retry++) {
-
- /*
- * We stop engines, otherwise we might get failed reset and a
- * dead gpu (on elk). Also as modern gpu as kbl can suffer
- * from system hang if batchbuffer is progressing when
- * the reset is issued, regardless of READY_TO_RESET ack.
- * Thus assume it is best to stop engines on all gens
- * where we have a gpu reset.
- *
- * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
- *
- * WaMediaResetMainRingCleanup:ctg,elk (presumably)
- *
- * FIXME: Wa for more modern gens needs to be validated
- */
- i915_stop_engines(dev_priv, engine_mask);
-
- ret = -ENODEV;
- if (reset) {
- ret = reset(dev_priv, engine_mask, retry);
- GEM_TRACE("engine_mask=%x, ret=%d, retry=%d\n",
- engine_mask, ret, retry);
- }
- if (ret != -ETIMEDOUT || engine_mask != ALL_ENGINES)
- break;
-
- cond_resched();
- }
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
- return ret;
-}
-
-bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
-{
- return intel_get_gpu_reset(dev_priv) != NULL;
-}
-
-bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
-{
- return (dev_priv->info.has_reset_engine &&
- i915_modparams.reset >= 2);
-}
-
-int intel_reset_guc(struct drm_i915_private *dev_priv)
-{
- u32 guc_domain = INTEL_GEN(dev_priv) >= 11 ? GEN11_GRDOM_GUC :
- GEN9_GRDOM_GUC;
- int ret;
-
- GEM_BUG_ON(!HAS_GUC(dev_priv));
-
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = gen6_hw_domain_reset(dev_priv, guc_domain);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-
- return ret;
-}
-
bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
{
return check_for_unclaimed_mmio(dev_priv);
@@ -2321,7 +1871,7 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
} else if (INTEL_GEN(dev_priv) >= 6) {
fw_domains = __gen6_reg_read_fw_domains(offset);
} else {
- WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
fw_domains = 0;
}
@@ -2341,12 +1891,12 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
} else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
fw_domains = __fwtable_reg_write_fw_domains(offset);
- } else if (IS_GEN8(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 8)) {
fw_domains = __gen8_reg_write_fw_domains(offset);
- } else if (IS_GEN(dev_priv, 6, 7)) {
+ } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
fw_domains = FORCEWAKE_RENDER;
} else {
- WARN_ON(!IS_GEN(dev_priv, 2, 5));
+ WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
fw_domains = 0;
}
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
index c56ba0e04044..23abf03736e7 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -6,7 +6,6 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
-#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
@@ -1083,6 +1082,6 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
/* Disable Power wells for VDSC/joining */
- intel_display_power_put(dev_priv,
- intel_dsc_power_domain(old_crtc_state));
+ intel_display_power_put_unchecked(dev_priv,
+ intel_dsc_power_domain(old_crtc_state));
}
diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c
index 92cb82dd0c07..f82a415ea2ba 100644
--- a/drivers/gpu/drm/i915/intel_wopcm.c
+++ b/drivers/gpu/drm/i915/intel_wopcm.c
@@ -130,11 +130,11 @@ static inline int check_hw_restriction(struct drm_i915_private *i915,
{
int err = 0;
- if (IS_GEN9(i915))
+ if (IS_GEN(i915, 9))
err = gen9_check_dword_gap(guc_wopcm_base, guc_wopcm_size);
if (!err &&
- (IS_GEN9(i915) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
+ (IS_GEN(i915, 9) || IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)))
err = gen9_check_huc_fw_fits(guc_wopcm_size, huc_fw_size);
return err;
@@ -163,7 +163,7 @@ int intel_wopcm_init(struct intel_wopcm *wopcm)
u32 guc_wopcm_rsvd;
int err;
- if (!USES_GUC(dev_priv))
+ if (!USES_GUC(i915))
return 0;
GEM_BUG_ON(!wopcm->size);
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 4f41e326f3f3..3210ad4e08f7 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -366,7 +366,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
* Only consider slices where one, and only one, subslice has 7
* EUs
*/
- if (!is_power_of_2(INTEL_INFO(i915)->sseu.subslice_7eu[i]))
+ if (!is_power_of_2(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]))
continue;
/*
@@ -375,7 +375,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine)
*
* -> 0 <= ss <= 3;
*/
- ss = ffs(INTEL_INFO(i915)->sseu.subslice_7eu[i]) - 1;
+ ss = ffs(RUNTIME_INFO(i915)->sseu.subslice_7eu[i]) - 1;
vals[i] = 3 - ss;
}
@@ -639,10 +639,9 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
wa_write_masked_or(wal, reg, val, val);
}
-static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(i915))
wa_write_or(wal,
@@ -666,11 +665,10 @@ static void gen9_gt_workarounds_init(struct drm_i915_private *i915)
BDW_DISABLE_HDC_INVALIDATION);
}
-static void skl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- gen9_gt_workarounds_init(i915);
+ gen9_gt_workarounds_init(i915, wal);
/* WaDisableGafsUnitClkGating:skl */
wa_write_or(wal,
@@ -684,11 +682,10 @@ static void skl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- gen9_gt_workarounds_init(i915);
+ gen9_gt_workarounds_init(i915, wal);
/* WaInPlaceDecompressionHang:bxt */
wa_write_or(wal,
@@ -696,11 +693,10 @@ static void bxt_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- gen9_gt_workarounds_init(i915);
+ gen9_gt_workarounds_init(i915, wal);
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
@@ -719,16 +715,16 @@ static void kbl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void glk_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- gen9_gt_workarounds_init(i915);
+ gen9_gt_workarounds_init(i915, wal);
}
-static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- gen9_gt_workarounds_init(i915);
+ gen9_gt_workarounds_init(i915, wal);
/* WaDisableGafsUnitClkGating:cfl */
wa_write_or(wal,
@@ -741,10 +737,10 @@ static void cfl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void wa_init_mcr(struct drm_i915_private *dev_priv)
+static void
+wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
{
- const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
- struct i915_wa_list *wal = &dev_priv->gt_wa_list;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
u32 mcr_slice_subslice_mask;
/*
@@ -804,11 +800,10 @@ static void wa_init_mcr(struct drm_i915_private *dev_priv)
intel_calculate_mcr_s_ss_select(dev_priv));
}
-static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- wa_init_mcr(i915);
+ wa_init_mcr(i915, wal);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
@@ -822,11 +817,10 @@ static void cnl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
-static void icl_gt_workarounds_init(struct drm_i915_private *i915)
+static void
+icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- wa_init_mcr(i915);
+ wa_init_mcr(i915, wal);
/* WaInPlaceDecompressionHang:icl */
wa_write_or(wal,
@@ -879,12 +873,9 @@ static void icl_gt_workarounds_init(struct drm_i915_private *i915)
GAMT_CHKN_DISABLE_L3_COH_PIPE);
}
-void intel_gt_init_workarounds(struct drm_i915_private *i915)
+static void
+gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
-
- wa_init_start(wal, "GT");
-
if (INTEL_GEN(i915) < 8)
return;
else if (IS_BROADWELL(i915))
@@ -892,22 +883,29 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
else if (IS_CHERRYVIEW(i915))
return;
else if (IS_SKYLAKE(i915))
- skl_gt_workarounds_init(i915);
+ skl_gt_workarounds_init(i915, wal);
else if (IS_BROXTON(i915))
- bxt_gt_workarounds_init(i915);
+ bxt_gt_workarounds_init(i915, wal);
else if (IS_KABYLAKE(i915))
- kbl_gt_workarounds_init(i915);
+ kbl_gt_workarounds_init(i915, wal);
else if (IS_GEMINILAKE(i915))
- glk_gt_workarounds_init(i915);
+ glk_gt_workarounds_init(i915, wal);
else if (IS_COFFEELAKE(i915))
- cfl_gt_workarounds_init(i915);
+ cfl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
- cnl_gt_workarounds_init(i915);
+ cnl_gt_workarounds_init(i915, wal);
else if (IS_ICELAKE(i915))
- icl_gt_workarounds_init(i915);
+ icl_gt_workarounds_init(i915, wal);
else
MISSING_CASE(INTEL_GEN(i915));
+}
+void intel_gt_init_workarounds(struct drm_i915_private *i915)
+{
+ struct i915_wa_list *wal = &i915->gt_wa_list;
+
+ wa_init_start(wal, "GT");
+ gt_init_workarounds(i915, wal);
wa_init_finish(wal);
}
@@ -955,8 +953,6 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
intel_uncore_forcewake_put__locked(dev_priv, fw);
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
-
- DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
@@ -1126,14 +1122,12 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
i915_mmio_reg_offset(RING_NOPID(base)));
-
- DRM_DEBUG_DRIVER("Applied %u %s workarounds\n", wal->count, wal->name);
}
-static void rcs_engine_wa_init(struct intel_engine_cs *engine)
+static void
+rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->wa_list;
if (IS_ICELAKE(i915)) {
/* This is not an Wa. Enable for better image quality */
@@ -1190,7 +1184,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
GEN7_DISABLE_SAMPLER_PREFETCH);
}
- if (IS_GEN9(i915) || IS_CANNONLAKE(i915)) {
+ if (IS_GEN(i915, 9) || IS_CANNONLAKE(i915)) {
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
@@ -1211,7 +1205,7 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
}
- if (IS_GEN9(i915)) {
+ if (IS_GEN(i915, 9)) {
/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
wa_masked_en(wal,
GEN9_CSFE_CHICKEN1_RCS,
@@ -1237,10 +1231,10 @@ static void rcs_engine_wa_init(struct intel_engine_cs *engine)
}
}
-static void xcs_engine_wa_init(struct intel_engine_cs *engine)
+static void
+xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_wa_list *wal = &engine->wa_list;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
@@ -1250,6 +1244,18 @@ static void xcs_engine_wa_init(struct intel_engine_cs *engine)
}
}
+static void
+engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
+ return;
+
+ if (engine->id == RCS)
+ rcs_engine_wa_init(engine, wal);
+ else
+ xcs_engine_wa_init(engine, wal);
+}
+
void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
@@ -1258,12 +1264,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
return;
wa_init_start(wal, engine->name);
-
- if (engine->id == RCS)
- rcs_engine_wa_init(engine);
- else
- xcs_engine_wa_init(engine);
-
+ engine_init_workarounds(engine, wal);
wa_init_finish(wal);
}
@@ -1273,11 +1274,5 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-static bool intel_engine_verify_workarounds(struct intel_engine_cs *engine,
- const char *from)
-{
- return wa_list_verify(engine->i915, &engine->wa_list, from);
-}
-
#include "selftests/intel_workarounds.c"
#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 26c065c8d2c0..a9a2fa35876f 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -972,7 +972,6 @@ static int gpu_write(struct i915_vma *vma,
{
struct i915_request *rq;
struct i915_vma *batch;
- int flags = 0;
int err;
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
@@ -981,14 +980,14 @@ static int gpu_write(struct i915_vma *vma,
if (err)
return err;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
batch = gpu_write_dw(vma, dword * sizeof(u32), value);
- if (IS_ERR(batch)) {
- err = PTR_ERR(batch);
- goto err_request;
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
}
err = i915_vma_move_to_active(batch, rq, 0);
@@ -996,21 +995,21 @@ static int gpu_write(struct i915_vma *vma,
goto err_request;
i915_gem_object_set_active_reference(batch->obj);
- i915_vma_unpin(batch);
- i915_vma_close(batch);
- err = engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
- flags);
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_request;
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ err = engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ 0);
+err_request:
if (err)
i915_request_skip(rq, err);
-
-err_request:
i915_request_add(rq);
+err_batch:
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
return err;
}
@@ -1450,7 +1449,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!HAS_FULL_48BIT_PPGTT(dev_priv)) {
+ if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1703,7 +1702,6 @@ int i915_gem_huge_page_mock_selftests(void)
};
struct drm_i915_private *dev_priv;
struct i915_hw_ppgtt *ppgtt;
- struct pci_dev *pdev;
int err;
dev_priv = mock_gem_device();
@@ -1713,9 +1711,6 @@ int i915_gem_huge_page_mock_selftests(void)
/* Pretend to be a device which supports the 48b PPGTT */
mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
- pdev = dev_priv->drm.pdev;
- dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(39));
-
mutex_lock(&dev_priv->drm.struct_mutex);
ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
if (IS_ERR(ppgtt)) {
@@ -1761,6 +1756,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
};
struct drm_file *file;
struct i915_gem_context *ctx;
+ intel_wakeref_t wakeref;
int err;
if (!HAS_PPGTT(dev_priv)) {
@@ -1776,7 +1772,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
ctx = live_context(dev_priv, file);
if (IS_ERR(ctx)) {
@@ -1790,7 +1786,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
err = i915_subtests(tests, ctx);
out_unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
mock_file_free(dev_priv, file);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index d0aa19d17653..e77b7ed449ae 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -16,9 +16,10 @@ static int switch_to_context(struct drm_i915_private *i915,
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *rq;
@@ -32,7 +33,7 @@ static int switch_to_context(struct drm_i915_private *i915,
i915_request_add(rq);
}
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
return err;
}
@@ -65,7 +66,9 @@ static void trash_stolen(struct drm_i915_private *i915)
static void simulate_hibernate(struct drm_i915_private *i915)
{
- intel_runtime_pm_get(i915);
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(i915);
/*
* As a final sting in the tail, invalidate stolen. Under a real S4,
@@ -76,7 +79,7 @@ static void simulate_hibernate(struct drm_i915_private *i915)
*/
trash_stolen(i915);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
}
static int pm_prepare(struct drm_i915_private *i915)
@@ -93,39 +96,39 @@ static int pm_prepare(struct drm_i915_private *i915)
static void pm_suspend(struct drm_i915_private *i915)
{
- intel_runtime_pm_get(i915);
-
- i915_gem_suspend_gtt_mappings(i915);
- i915_gem_suspend_late(i915);
+ intel_wakeref_t wakeref;
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref) {
+ i915_gem_suspend_gtt_mappings(i915);
+ i915_gem_suspend_late(i915);
+ }
}
static void pm_hibernate(struct drm_i915_private *i915)
{
- intel_runtime_pm_get(i915);
+ intel_wakeref_t wakeref;
- i915_gem_suspend_gtt_mappings(i915);
+ with_intel_runtime_pm(i915, wakeref) {
+ i915_gem_suspend_gtt_mappings(i915);
- i915_gem_freeze(i915);
- i915_gem_freeze_late(i915);
-
- intel_runtime_pm_put(i915);
+ i915_gem_freeze(i915);
+ i915_gem_freeze_late(i915);
+ }
}
static void pm_resume(struct drm_i915_private *i915)
{
+ intel_wakeref_t wakeref;
+
/*
* Both suspend and hibernate follow the same wakeup path and assume
* that runtime-pm just works.
*/
- intel_runtime_pm_get(i915);
-
- intel_engines_sanitize(i915);
- i915_gem_sanitize(i915);
- i915_gem_resume(i915);
-
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref) {
+ intel_engines_sanitize(i915, false);
+ i915_gem_sanitize(i915);
+ i915_gem_resume(i915);
+ }
}
static int igt_gem_suspend(void *arg)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index f7392c1ffe75..fd89a5a33c1a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -279,6 +279,7 @@ static int igt_gem_coherency(void *arg)
struct drm_i915_private *i915 = arg;
const struct igt_coherency_mode *read, *write, *over;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
unsigned long count, n;
u32 *offsets, *values;
int err = 0;
@@ -298,7 +299,7 @@ static int igt_gem_coherency(void *arg)
values = offsets + ncachelines;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
for (over = igt_coherency_mode; over->name; over++) {
if (!over->set)
continue;
@@ -376,7 +377,7 @@ static int igt_gem_coherency(void *arg)
}
}
unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kfree(offsets);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7d82043aff10..e2c1f0bc2abe 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -27,6 +27,7 @@
#include "../i915_selftest.h"
#include "i915_random.h"
#include "igt_flush_test.h"
+#include "igt_live_test.h"
#include "mock_drm.h"
#include "mock_gem_device.h"
@@ -34,84 +35,6 @@
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
-struct live_test {
- struct drm_i915_private *i915;
- const char *func;
- const char *name;
-
- unsigned int reset_global;
- unsigned int reset_engine[I915_NUM_ENGINES];
-};
-
-static int begin_live_test(struct live_test *t,
- struct drm_i915_private *i915,
- const char *func,
- const char *name)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err;
-
- t->i915 = i915;
- t->func = func;
- t->name = name;
-
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err) {
- pr_err("%s(%s): failed to idle before, with err=%d!",
- func, name, err);
- return err;
- }
-
- i915->gpu_error.missed_irq_rings = 0;
- t->reset_global = i915_reset_count(&i915->gpu_error);
-
- for_each_engine(engine, i915, id)
- t->reset_engine[id] =
- i915_reset_engine_count(&i915->gpu_error, engine);
-
- return 0;
-}
-
-static int end_live_test(struct live_test *t)
-{
- struct drm_i915_private *i915 = t->i915;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- return -EIO;
-
- if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
- pr_err("%s(%s): GPU was reset %d times!\n",
- t->func, t->name,
- i915_reset_count(&i915->gpu_error) - t->reset_global);
- return -EIO;
- }
-
- for_each_engine(engine, i915, id) {
- if (t->reset_engine[id] ==
- i915_reset_engine_count(&i915->gpu_error, engine))
- continue;
-
- pr_err("%s(%s): engine '%s' was reset %d times!\n",
- t->func, t->name, engine->name,
- i915_reset_engine_count(&i915->gpu_error, engine) -
- t->reset_engine[id]);
- return -EIO;
- }
-
- if (i915->gpu_error.missed_irq_rings) {
- pr_err("%s(%s): Missed interrupts on engines %lx\n",
- t->func, t->name, i915->gpu_error.missed_irq_rings);
- return -EIO;
- }
-
- return 0;
-}
-
static int live_nop_switch(void *arg)
{
const unsigned int nctx = 1024;
@@ -119,8 +42,9 @@ static int live_nop_switch(void *arg)
struct intel_engine_cs *engine;
struct i915_gem_context **ctx;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
struct drm_file *file;
- struct live_test t;
unsigned long n;
int err = -ENODEV;
@@ -140,7 +64,7 @@ static int live_nop_switch(void *arg)
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
@@ -184,7 +108,7 @@ static int live_nop_switch(void *arg)
pr_info("Populated %d contexts on %s in %lluns\n",
nctx, engine->name, ktime_to_ns(times[1] - times[0]));
- err = begin_live_test(&t, i915, __func__, engine->name);
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_unlock;
@@ -232,7 +156,7 @@ static int live_nop_switch(void *arg)
break;
}
- err = end_live_test(&t);
+ err = igt_live_test_end(&t);
if (err)
goto out_unlock;
@@ -243,7 +167,7 @@ static int live_nop_switch(void *arg)
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
return err;
@@ -553,10 +477,10 @@ static int igt_ctx_exec(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj = NULL;
unsigned long ncontexts, ndwords, dw;
+ struct igt_live_test t;
struct drm_file *file;
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
- struct live_test t;
int err = -ENODEV;
/*
@@ -574,7 +498,7 @@ static int igt_ctx_exec(void *arg)
mutex_lock(&i915->drm.struct_mutex);
- err = begin_live_test(&t, i915, __func__, "");
+ err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
@@ -593,6 +517,8 @@ static int igt_ctx_exec(void *arg)
}
for_each_engine(engine, i915, id) {
+ intel_wakeref_t wakeref;
+
if (!engine->context_size)
continue; /* No logical context support in HW */
@@ -607,9 +533,9 @@ static int igt_ctx_exec(void *arg)
}
}
- intel_runtime_pm_get(i915);
- err = gpu_fill(obj, ctx, engine, dw);
- intel_runtime_pm_put(i915);
+ err = 0;
+ with_intel_runtime_pm(i915, wakeref)
+ err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
@@ -627,7 +553,7 @@ static int igt_ctx_exec(void *arg)
ncontexts++;
}
pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
- ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
+ ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
dw = 0;
list_for_each_entry(obj, &objects, st_link) {
@@ -642,7 +568,7 @@ static int igt_ctx_exec(void *arg)
}
out_unlock:
- if (end_live_test(&t))
+ if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -657,11 +583,11 @@ static int igt_ctx_readonly(void *arg)
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
unsigned long ndwords, dw;
+ struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
IGT_TIMEOUT(end_time);
LIST_HEAD(objects);
- struct live_test t;
int err = -ENODEV;
/*
@@ -676,7 +602,7 @@ static int igt_ctx_readonly(void *arg)
mutex_lock(&i915->drm.struct_mutex);
- err = begin_live_test(&t, i915, __func__, "");
+ err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
@@ -699,6 +625,8 @@ static int igt_ctx_readonly(void *arg)
unsigned int id;
for_each_engine(engine, i915, id) {
+ intel_wakeref_t wakeref;
+
if (!intel_engine_can_store_dword(engine))
continue;
@@ -713,9 +641,9 @@ static int igt_ctx_readonly(void *arg)
i915_gem_object_set_readonly(obj);
}
- intel_runtime_pm_get(i915);
- err = gpu_fill(obj, ctx, engine, dw);
- intel_runtime_pm_put(i915);
+ err = 0;
+ with_intel_runtime_pm(i915, wakeref)
+ err = gpu_fill(obj, ctx, engine, dw);
if (err) {
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
@@ -732,7 +660,7 @@ static int igt_ctx_readonly(void *arg)
}
}
pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, INTEL_INFO(i915)->num_rings);
+ ndwords, RUNTIME_INFO(i915)->num_rings);
dw = 0;
list_for_each_entry(obj, &objects, st_link) {
@@ -752,7 +680,7 @@ static int igt_ctx_readonly(void *arg)
}
out_unlock:
- if (end_live_test(&t))
+ if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -976,10 +904,11 @@ static int igt_vm_isolation(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_gem_context *ctx_a, *ctx_b;
struct intel_engine_cs *engine;
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
unsigned long count;
- struct live_test t;
unsigned int id;
u64 vm_total;
int err;
@@ -998,7 +927,7 @@ static int igt_vm_isolation(void *arg)
mutex_lock(&i915->drm.struct_mutex);
- err = begin_live_test(&t, i915, __func__, "");
+ err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
@@ -1022,7 +951,7 @@ static int igt_vm_isolation(void *arg)
GEM_BUG_ON(ctx_b->ppgtt->vm.total != vm_total);
vm_total -= I915_GTT_PAGE_SIZE;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
count = 0;
for_each_engine(engine, i915, id) {
@@ -1064,12 +993,12 @@ static int igt_vm_isolation(void *arg)
count += this;
}
pr_info("Checked %lu scratch offsets across %d engines\n",
- count, INTEL_INFO(i915)->num_rings);
+ count, RUNTIME_INFO(i915)->num_rings);
out_rpm:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
out_unlock:
- if (end_live_test(&t))
+ if (igt_live_test_end(&t))
err = -EIO;
mutex_unlock(&i915->drm.struct_mutex);
@@ -1165,6 +1094,7 @@ static int igt_switch_to_kernel_context(void *arg)
struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err;
/*
@@ -1175,7 +1105,7 @@ static int igt_switch_to_kernel_context(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
ctx = kernel_context(i915);
if (IS_ERR(ctx)) {
@@ -1200,7 +1130,7 @@ out_unlock:
if (igt_flush_test(i915, I915_WAIT_LOCKED))
err = -EIO;
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
kernel_context_close(ctx);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 4365979d8222..d0553bc69705 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -29,11 +29,23 @@
#include "mock_drm.h"
#include "mock_gem_device.h"
-static int populate_ggtt(struct drm_i915_private *i915)
+static void quirk_add(struct drm_i915_gem_object *obj,
+ struct list_head *objects)
{
+ /* quirk is only for live tiled objects, use it to declare ownership */
+ GEM_BUG_ON(obj->mm.quirked);
+ obj->mm.quirked = true;
+ list_add(&obj->st_link, objects);
+}
+
+static int populate_ggtt(struct drm_i915_private *i915,
+ struct list_head *objects)
+{
+ unsigned long unbound, bound, count;
struct drm_i915_gem_object *obj;
u64 size;
+ count = 0;
for (size = 0;
size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
size += I915_GTT_PAGE_SIZE) {
@@ -43,17 +55,32 @@ static int populate_ggtt(struct drm_i915_private *i915)
if (IS_ERR(obj))
return PTR_ERR(obj);
+ quirk_add(obj, objects);
+
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma))
return PTR_ERR(vma);
+
+ count++;
}
- if (!list_empty(&i915->mm.unbound_list)) {
- size = 0;
- list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
- size++;
+ unbound = 0;
+ list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
+ if (obj->mm.quirked)
+ unbound++;
+ if (unbound) {
+ pr_err("%s: Found %lu objects unbound, expected %u!\n",
+ __func__, unbound, 0);
+ return -EINVAL;
+ }
- pr_err("Found %lld objects unbound!\n", size);
+ bound = 0;
+ list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
+ if (obj->mm.quirked)
+ bound++;
+ if (bound != count) {
+ pr_err("%s: Found %lu objects bound, expected %lu!\n",
+ __func__, bound, count);
return -EINVAL;
}
@@ -70,18 +97,20 @@ static void unpin_ggtt(struct drm_i915_private *i915)
struct i915_vma *vma;
list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
- i915_vma_unpin(vma);
+ if (vma->obj->mm.quirked)
+ i915_vma_unpin(vma);
}
-static void cleanup_objects(struct drm_i915_private *i915)
+static void cleanup_objects(struct drm_i915_private *i915,
+ struct list_head *list)
{
struct drm_i915_gem_object *obj, *on;
- list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, mm.link)
- i915_gem_object_put(obj);
-
- list_for_each_entry_safe(obj, on, &i915->mm.bound_list, mm.link)
+ list_for_each_entry_safe(obj, on, list, st_link) {
+ GEM_BUG_ON(!obj->mm.quirked);
+ obj->mm.quirked = false;
i915_gem_object_put(obj);
+ }
mutex_unlock(&i915->drm.struct_mutex);
@@ -94,11 +123,12 @@ static int igt_evict_something(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_ggtt *ggtt = &i915->ggtt;
+ LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict one. */
- err = populate_ggtt(i915);
+ err = populate_ggtt(i915, &objects);
if (err)
goto cleanup;
@@ -127,7 +157,7 @@ static int igt_evict_something(void *arg)
}
cleanup:
- cleanup_objects(i915);
+ cleanup_objects(i915, &objects);
return err;
}
@@ -136,13 +166,14 @@ static int igt_overcommit(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and then try to pin one more.
* We expect it to fail.
*/
- err = populate_ggtt(i915);
+ err = populate_ggtt(i915, &objects);
if (err)
goto cleanup;
@@ -152,6 +183,8 @@ static int igt_overcommit(void *arg)
goto cleanup;
}
+ quirk_add(obj, &objects);
+
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
@@ -160,7 +193,7 @@ static int igt_overcommit(void *arg)
}
cleanup:
- cleanup_objects(i915);
+ cleanup_objects(i915, &objects);
return err;
}
@@ -172,11 +205,12 @@ static int igt_evict_for_vma(void *arg)
.start = 0,
.size = 4096,
};
+ LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict a range. */
- err = populate_ggtt(i915);
+ err = populate_ggtt(i915, &objects);
if (err)
goto cleanup;
@@ -199,7 +233,7 @@ static int igt_evict_for_vma(void *arg)
}
cleanup:
- cleanup_objects(i915);
+ cleanup_objects(i915, &objects);
return err;
}
@@ -222,6 +256,7 @@ static int igt_evict_for_cache_color(void *arg)
};
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
+ LIST_HEAD(objects);
int err;
/* Currently the use of color_adjust is limited to cache domains within
@@ -237,6 +272,7 @@ static int igt_evict_for_cache_color(void *arg)
goto cleanup;
}
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
I915_GTT_PAGE_SIZE | flags);
@@ -252,6 +288,7 @@ static int igt_evict_for_cache_color(void *arg)
goto cleanup;
}
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ quirk_add(obj, &objects);
/* Neighbouring; same colour - should fit */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -287,7 +324,7 @@ static int igt_evict_for_cache_color(void *arg)
cleanup:
unpin_ggtt(i915);
- cleanup_objects(i915);
+ cleanup_objects(i915, &objects);
ggtt->vm.mm.color_adjust = NULL;
return err;
}
@@ -296,11 +333,12 @@ static int igt_evict_vm(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_ggtt *ggtt = &i915->ggtt;
+ LIST_HEAD(objects);
int err;
/* Fill the GGTT with pinned objects and try to evict everything. */
- err = populate_ggtt(i915);
+ err = populate_ggtt(i915, &objects);
if (err)
goto cleanup;
@@ -322,7 +360,7 @@ static int igt_evict_vm(void *arg)
}
cleanup:
- cleanup_objects(i915);
+ cleanup_objects(i915, &objects);
return err;
}
@@ -336,6 +374,7 @@ static int igt_evict_contexts(void *arg)
struct drm_mm_node node;
struct reserved *next;
} *reserved = NULL;
+ intel_wakeref_t wakeref;
struct drm_mm_node hole;
unsigned long count;
int err;
@@ -355,7 +394,7 @@ static int igt_evict_contexts(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
/* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole));
@@ -400,8 +439,10 @@ static int igt_evict_contexts(void *arg)
struct drm_file *file;
file = mock_file(i915);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ break;
+ }
count = 0;
mutex_lock(&i915->drm.struct_mutex);
@@ -464,7 +505,7 @@ out_locked:
}
if (drm_mm_node_allocated(&hole))
drm_mm_remove_node(&hole);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -480,14 +521,17 @@ int i915_gem_evict_mock_selftests(void)
SUBTEST(igt_overcommit),
};
struct drm_i915_private *i915;
- int err;
+ intel_wakeref_t wakeref;
+ int err = 0;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, i915);
+ with_intel_runtime_pm(i915, wakeref)
+ err = i915_subtests(tests, i915);
+
mutex_unlock(&i915->drm.struct_mutex);
drm_dev_put(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index a9ed0ecc94e2..06bde4a273cb 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -275,6 +275,7 @@ static int lowlevel_hole(struct drm_i915_private *i915,
for (n = 0; n < count; n++) {
u64 addr = hole_start + order[n] * BIT_ULL(size);
+ intel_wakeref_t wakeref;
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
@@ -293,9 +294,9 @@ static int lowlevel_hole(struct drm_i915_private *i915,
mock_vma.node.size = BIT_ULL(size);
mock_vma.node.start = addr;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
}
count = n;
@@ -1144,6 +1145,7 @@ static int igt_ggtt_page(void *arg)
struct drm_i915_private *i915 = arg;
struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
struct drm_mm_node tmp;
unsigned int *order, n;
int err;
@@ -1169,7 +1171,7 @@ static int igt_ggtt_page(void *arg)
if (err)
goto out_unpin;
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE;
@@ -1216,7 +1218,7 @@ static int igt_ggtt_page(void *arg)
kfree(order);
out_remove:
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
drm_mm_remove_node(&tmp);
out_unpin:
i915_gem_object_unpin_pages(obj);
@@ -1265,27 +1267,35 @@ static int exercise_mock(struct drm_i915_private *i915,
static int igt_mock_fill(void *arg)
{
- return exercise_mock(arg, fill_hole);
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, fill_hole);
}
static int igt_mock_walk(void *arg)
{
- return exercise_mock(arg, walk_hole);
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, walk_hole);
}
static int igt_mock_pot(void *arg)
{
- return exercise_mock(arg, pot_hole);
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, pot_hole);
}
static int igt_mock_drunk(void *arg)
{
- return exercise_mock(arg, drunk_hole);
+ struct i915_ggtt *ggtt = arg;
+
+ return exercise_mock(ggtt->vm.i915, drunk_hole);
}
static int igt_gtt_reserve(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = arg;
struct drm_i915_gem_object *obj, *on;
LIST_HEAD(objects);
u64 total;
@@ -1298,11 +1308,12 @@ static int igt_gtt_reserve(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- total += 2*I915_GTT_PAGE_SIZE) {
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+ total += 2 * I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ 2 * PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out;
@@ -1316,20 +1327,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1347,11 +1358,12 @@ static int igt_gtt_reserve(void *arg)
/* Now we start forcing evictions */
for (total = I915_GTT_PAGE_SIZE;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- total += 2*I915_GTT_PAGE_SIZE) {
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+ total += 2 * I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ 2 * PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out;
@@ -1365,20 +1377,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
total,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1399,7 +1411,7 @@ static int igt_gtt_reserve(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1411,18 +1423,18 @@ static int igt_gtt_reserve(void *arg)
goto out;
}
- offset = random_offset(0, i915->ggtt.vm.total,
+ offset = random_offset(0, ggtt->vm.total,
2*I915_GTT_PAGE_SIZE,
I915_GTT_MIN_ALIGNMENT);
- err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
obj->base.size,
offset,
obj->cache_level,
0);
if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1448,7 +1460,7 @@ out:
static int igt_gtt_insert(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = arg;
struct drm_i915_gem_object *obj, *on;
struct drm_mm_node tmp = {};
const struct invalid_insert {
@@ -1457,8 +1469,8 @@ static int igt_gtt_insert(void *arg)
u64 start, end;
} invalid_insert[] = {
{
- i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
- 0, i915->ggtt.vm.total,
+ ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
+ 0, ggtt->vm.total,
},
{
2*I915_GTT_PAGE_SIZE, 0,
@@ -1488,7 +1500,7 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) {
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
+ err = i915_gem_gtt_insert(&ggtt->vm, &tmp,
ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE,
ii->start, ii->end,
@@ -1503,11 +1515,12 @@ static int igt_gtt_insert(void *arg)
/* Start by filling the GGTT */
for (total = 0;
- total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
+ total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
total += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out;
@@ -1521,15 +1534,15 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
0);
if (err == -ENOSPC) {
/* maxed out the GGTT space */
@@ -1538,7 +1551,7 @@ static int igt_gtt_insert(void *arg)
}
if (err) {
pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1550,7 +1563,7 @@ static int igt_gtt_insert(void *arg)
list_for_each_entry(obj, &objects, st_link) {
struct i915_vma *vma;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1570,7 +1583,7 @@ static int igt_gtt_insert(void *arg)
struct i915_vma *vma;
u64 offset;
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
@@ -1585,13 +1598,13 @@ static int igt_gtt_insert(void *arg)
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1607,11 +1620,12 @@ static int igt_gtt_insert(void *arg)
/* And then force evictions */
for (total = 0;
- total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
- total += 2*I915_GTT_PAGE_SIZE) {
+ total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
+ total += 2 * I915_GTT_PAGE_SIZE) {
struct i915_vma *vma;
- obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915,
+ 2 * I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto out;
@@ -1625,19 +1639,19 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects);
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto out;
}
- err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
+ err = i915_gem_gtt_insert(&ggtt->vm, &vma->node,
obj->base.size, 0, obj->cache_level,
- 0, i915->ggtt.vm.total,
+ 0, ggtt->vm.total,
0);
if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
- total, i915->ggtt.vm.total, err);
+ total, ggtt->vm.total, err);
goto out;
}
track_vma_bind(vma);
@@ -1664,17 +1678,25 @@ int i915_gem_gtt_mock_selftests(void)
SUBTEST(igt_gtt_insert),
};
struct drm_i915_private *i915;
+ struct i915_ggtt ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
+ mock_init_ggtt(i915, &ggtt);
+
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, i915);
+ err = i915_subtests(tests, &ggtt);
+ mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(i915);
+
+ mock_fini_ggtt(&ggtt);
drm_dev_put(&i915->drm);
+
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index c3999dd2021e..395ae878e0f7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -238,6 +238,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
u32 *cpu;
GEM_BUG_ON(view.partial.size > nreal);
+ cond_resched();
err = i915_gem_object_set_to_gtt_domain(obj, true);
if (err) {
@@ -307,6 +308,7 @@ static int igt_partial_tiling(void *arg)
const unsigned int nreal = 1 << 12; /* largest tile row x2 */
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
int tiling;
int err;
@@ -332,7 +334,7 @@ static int igt_partial_tiling(void *arg)
}
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (1) {
IGT_TIMEOUT(end);
@@ -443,7 +445,7 @@ next_tiling: ;
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_object_unpin_pages(obj);
out:
@@ -505,11 +507,13 @@ static void disable_retire_worker(struct drm_i915_private *i915)
mutex_lock(&i915->drm.struct_mutex);
if (!i915->gt.active_requests++) {
- intel_runtime_pm_get(i915);
- i915_gem_unpark(i915);
- intel_runtime_pm_put(i915);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(i915, wakeref)
+ i915_gem_unpark(i915);
}
mutex_unlock(&i915->drm.struct_mutex);
+
cancel_delayed_work_sync(&i915->gt.retire_work);
cancel_delayed_work_sync(&i915->gt.idle_work);
}
@@ -577,6 +581,8 @@ static int igt_mmap_offset_exhaustion(void *arg)
/* Now fill with busy dead objects that we expect to reap */
for (loop = 0; loop < 3; loop++) {
+ intel_wakeref_t wakeref;
+
if (i915_terminally_wedged(&i915->gpu_error))
break;
@@ -586,10 +592,10 @@ static int igt_mmap_offset_exhaustion(void *arg)
goto out;
}
+ err = 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
- err = make_obj_busy(obj);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ err = make_obj_busy(obj);
mutex_unlock(&i915->drm.struct_mutex);
if (err) {
pr_err("[loop %d] Failed to busy the object\n", loop);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 07e557815308..4d4b86b5fa11 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -25,6 +25,7 @@
#include <linux/prime_numbers.h>
#include "../i915_selftest.h"
+#include "igt_live_test.h"
#include "mock_context.h"
#include "mock_gem_device.h"
@@ -255,84 +256,27 @@ int i915_request_mock_selftests(void)
SUBTEST(igt_request_rewind),
};
struct drm_i915_private *i915;
- int err;
+ intel_wakeref_t wakeref;
+ int err = 0;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- err = i915_subtests(tests, i915);
+ with_intel_runtime_pm(i915, wakeref)
+ err = i915_subtests(tests, i915);
+
drm_dev_put(&i915->drm);
return err;
}
-struct live_test {
- struct drm_i915_private *i915;
- const char *func;
- const char *name;
-
- unsigned int reset_count;
-};
-
-static int begin_live_test(struct live_test *t,
- struct drm_i915_private *i915,
- const char *func,
- const char *name)
-{
- int err;
-
- t->i915 = i915;
- t->func = func;
- t->name = name;
-
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err) {
- pr_err("%s(%s): failed to idle before, with err=%d!",
- func, name, err);
- return err;
- }
-
- i915->gpu_error.missed_irq_rings = 0;
- t->reset_count = i915_reset_count(&i915->gpu_error);
-
- return 0;
-}
-
-static int end_live_test(struct live_test *t)
-{
- struct drm_i915_private *i915 = t->i915;
-
- i915_retire_requests(i915);
-
- if (wait_for(intel_engines_are_idle(i915), 10)) {
- pr_err("%s(%s): GPU not idle\n", t->func, t->name);
- return -EIO;
- }
-
- if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
- pr_err("%s(%s): GPU was reset %d times!\n",
- t->func, t->name,
- i915_reset_count(&i915->gpu_error) - t->reset_count);
- return -EIO;
- }
-
- if (i915->gpu_error.missed_irq_rings) {
- pr_err("%s(%s): Missed interrupts on engines %lx\n",
- t->func, t->name, i915->gpu_error.missed_irq_rings);
- return -EIO;
- }
-
- return 0;
-}
-
static int live_nop_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- struct live_test t;
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
unsigned int id;
int err = -ENODEV;
@@ -342,7 +286,7 @@ static int live_nop_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
for_each_engine(engine, i915, id) {
struct i915_request *request = NULL;
@@ -350,7 +294,7 @@ static int live_nop_request(void *arg)
IGT_TIMEOUT(end_time);
ktime_t times[2] = {};
- err = begin_live_test(&t, i915, __func__, engine->name);
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_unlock;
@@ -392,7 +336,7 @@ static int live_nop_request(void *arg)
break;
}
- err = end_live_test(&t);
+ err = igt_live_test_end(&t);
if (err)
goto out_unlock;
@@ -403,7 +347,7 @@ static int live_nop_request(void *arg)
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -478,7 +422,8 @@ static int live_empty_request(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
- struct live_test t;
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
struct i915_vma *batch;
unsigned int id;
int err = 0;
@@ -489,7 +434,7 @@ static int live_empty_request(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
batch = empty_batch(i915);
if (IS_ERR(batch)) {
@@ -503,7 +448,7 @@ static int live_empty_request(void *arg)
unsigned long n, prime;
ktime_t times[2] = {};
- err = begin_live_test(&t, i915, __func__, engine->name);
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
if (err)
goto out_batch;
@@ -539,7 +484,7 @@ static int live_empty_request(void *arg)
break;
}
- err = end_live_test(&t);
+ err = igt_live_test_end(&t);
if (err)
goto out_batch;
@@ -553,7 +498,7 @@ out_batch:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -637,8 +582,9 @@ static int live_all_engines(void *arg)
struct drm_i915_private *i915 = arg;
struct intel_engine_cs *engine;
struct i915_request *request[I915_NUM_ENGINES];
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
struct i915_vma *batch;
- struct live_test t;
unsigned int id;
int err;
@@ -648,9 +594,9 @@ static int live_all_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
- err = begin_live_test(&t, i915, __func__, "");
+ err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
@@ -722,7 +668,7 @@ static int live_all_engines(void *arg)
request[id] = NULL;
}
- err = end_live_test(&t);
+ err = igt_live_test_end(&t);
out_request:
for_each_engine(engine, i915, id)
@@ -731,7 +677,7 @@ out_request:
i915_vma_unpin(batch);
i915_vma_put(batch);
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -742,7 +688,8 @@ static int live_sequential_engines(void *arg)
struct i915_request *request[I915_NUM_ENGINES] = {};
struct i915_request *prev = NULL;
struct intel_engine_cs *engine;
- struct live_test t;
+ intel_wakeref_t wakeref;
+ struct igt_live_test t;
unsigned int id;
int err;
@@ -753,9 +700,9 @@ static int live_sequential_engines(void *arg)
*/
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
- err = begin_live_test(&t, i915, __func__, "");
+ err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
@@ -838,7 +785,7 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(!i915_request_completed(request[id]));
}
- err = end_live_test(&t);
+ err = igt_live_test_end(&t);
out_request:
for_each_engine(engine, i915, id) {
@@ -860,7 +807,7 @@ out_request:
i915_request_put(request[id]);
}
out_unlock:
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index ffa74290e054..f0a32edfb9b1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -28,6 +28,7 @@
#include "mock_gem_device.h"
#include "mock_context.h"
+#include "mock_gtt.h"
static bool assert_vma(struct i915_vma *vma,
struct drm_i915_gem_object *obj,
@@ -141,7 +142,8 @@ static int create_vmas(struct drm_i915_private *i915,
static int igt_vma_create(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = arg;
+ struct drm_i915_private *i915 = ggtt->vm.i915;
struct drm_i915_gem_object *obj, *on;
struct i915_gem_context *ctx, *cn;
unsigned long num_obj, num_ctx;
@@ -245,7 +247,7 @@ static bool assert_pin_einval(const struct i915_vma *vma,
static int igt_vma_pin1(void *arg)
{
- struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = arg;
const struct pin_mode modes[] = {
#define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " }
#define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" }
@@ -256,30 +258,30 @@ static int igt_vma_pin1(void *arg)
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
-
- VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
- INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
- VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
- INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)),
+
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | ggtt->mappable_end),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | ggtt->vm.total),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
VALID(4096, PIN_GLOBAL),
VALID(8192, PIN_GLOBAL),
- VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
- VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
- NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
- VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
- VALID(i915->ggtt.vm.total, PIN_GLOBAL),
- NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
+ VALID(ggtt->mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
+ VALID(ggtt->mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
+ NOSPACE(ggtt->mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
+ VALID(ggtt->vm.total - 4096, PIN_GLOBAL),
+ VALID(ggtt->vm.total, PIN_GLOBAL),
+ NOSPACE(ggtt->vm.total + 4096, PIN_GLOBAL),
NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
- INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
- INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (ggtt->mappable_end - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
- VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+ VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
#if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
/* Misusing BIAS is a programming error (it is not controllable
@@ -287,10 +289,10 @@ static int igt_vma_pin1(void *arg)
* However, the tests are still quite interesting for checking
* variable start, end and size.
*/
- NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
- NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
- NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
- NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
+ NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | ggtt->mappable_end),
+ NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | ggtt->vm.total),
+ NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (ggtt->mappable_end - 4096)),
+ NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)),
#endif
{ },
#undef NOSPACE
@@ -306,13 +308,13 @@ static int igt_vma_pin1(void *arg)
* focusing on error handling of boundary conditions.
*/
- GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
+ GEM_BUG_ON(!drm_mm_clean(&ggtt->vm.mm));
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(ggtt->vm.i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = checked_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma))
goto out;
@@ -403,8 +405,8 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
static int igt_vma_rotate(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.vm;
+ struct i915_ggtt *ggtt = arg;
+ struct i915_address_space *vm = &ggtt->vm;
struct drm_i915_gem_object *obj;
const struct intel_rotation_plane_info planes[] = {
{ .width = 1, .height = 1, .stride = 1 },
@@ -431,7 +433,7 @@ static int igt_vma_rotate(void *arg)
* that the page layout within the rotated VMA match our expectations.
*/
- obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE);
if (IS_ERR(obj))
goto out;
@@ -602,8 +604,8 @@ static bool assert_pin(struct i915_vma *vma,
static int igt_vma_partial(void *arg)
{
- struct drm_i915_private *i915 = arg;
- struct i915_address_space *vm = &i915->ggtt.vm;
+ struct i915_ggtt *ggtt = arg;
+ struct i915_address_space *vm = &ggtt->vm;
const unsigned int npages = 1021; /* prime! */
struct drm_i915_gem_object *obj;
const struct phase {
@@ -621,7 +623,7 @@ static int igt_vma_partial(void *arg)
* we are returned the same VMA when we later request the same range.
*/
- obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vm->i915, npages * PAGE_SIZE);
if (IS_ERR(obj))
goto out;
@@ -723,17 +725,24 @@ int i915_vma_mock_selftests(void)
SUBTEST(igt_vma_partial),
};
struct drm_i915_private *i915;
+ struct i915_ggtt ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
+ mock_init_ggtt(i915, &ggtt);
+
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, i915);
+ err = i915_subtests(tests, &ggtt);
+ mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_drain_freed_objects(i915);
+
+ mock_fini_ggtt(&ggtt);
drm_dev_put(&i915->drm);
+
return err;
}
-
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c
new file mode 100644
index 000000000000..5deb485fb942
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c
@@ -0,0 +1,85 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "../i915_drv.h"
+
+#include "../i915_selftest.h"
+#include "igt_flush_test.h"
+#include "igt_live_test.h"
+
+int igt_live_test_begin(struct igt_live_test *t,
+ struct drm_i915_private *i915,
+ const char *func,
+ const char *name)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ t->i915 = i915;
+ t->func = func;
+ t->name = name;
+
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err) {
+ pr_err("%s(%s): failed to idle before, with err=%d!",
+ func, name, err);
+ return err;
+ }
+
+ i915->gpu_error.missed_irq_rings = 0;
+ t->reset_global = i915_reset_count(&i915->gpu_error);
+
+ for_each_engine(engine, i915, id)
+ t->reset_engine[id] =
+ i915_reset_engine_count(&i915->gpu_error, engine);
+
+ return 0;
+}
+
+int igt_live_test_end(struct igt_live_test *t)
+{
+ struct drm_i915_private *i915 = t->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ return -EIO;
+
+ if (t->reset_global != i915_reset_count(&i915->gpu_error)) {
+ pr_err("%s(%s): GPU was reset %d times!\n",
+ t->func, t->name,
+ i915_reset_count(&i915->gpu_error) - t->reset_global);
+ return -EIO;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (t->reset_engine[id] ==
+ i915_reset_engine_count(&i915->gpu_error, engine))
+ continue;
+
+ pr_err("%s(%s): engine '%s' was reset %d times!\n",
+ t->func, t->name, engine->name,
+ i915_reset_engine_count(&i915->gpu_error, engine) -
+ t->reset_engine[id]);
+ return -EIO;
+ }
+
+ if (i915->gpu_error.missed_irq_rings) {
+ pr_err("%s(%s): Missed interrupts on engines %lx\n",
+ t->func, t->name, i915->gpu_error.missed_irq_rings);
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.h b/drivers/gpu/drm/i915/selftests/igt_live_test.h
new file mode 100644
index 000000000000..c0e9f99d50de
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/igt_live_test.h
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef IGT_LIVE_TEST_H
+#define IGT_LIVE_TEST_H
+
+#include "../i915_gem.h"
+
+struct drm_i915_private;
+
+struct igt_live_test {
+ struct drm_i915_private *i915;
+ const char *func;
+ const char *name;
+
+ unsigned int reset_global;
+ unsigned int reset_engine[I915_NUM_ENGINES];
+};
+
+/*
+ * Flush the GPU state before and after the test to ensure that no residual
+ * code is running on the GPU that may affect this test. Also compare the
+ * state before and after the test and alert if it unexpectedly changes,
+ * e.g. if the GPU was reset.
+ */
+int igt_live_test_begin(struct igt_live_test *t,
+ struct drm_i915_private *i915,
+ const char *func,
+ const char *name);
+int igt_live_test_end(struct igt_live_test *t);
+
+#endif /* IGT_LIVE_TEST_H */
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 8cd34f6e6859..0e70df0230b8 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -68,48 +68,65 @@ static u64 hws_address(const struct i915_vma *hws,
return hws->node.start + seqno_offset(rq->fence.context);
}
-static int emit_recurse_batch(struct igt_spinner *spin,
- struct i915_request *rq,
- u32 arbitration_command)
+static int move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
{
- struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+ int err;
+
+ err = i915_vma_move_to_active(vma, rq, flags);
+ if (err)
+ return err;
+
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ return 0;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ u32 arbitration_command)
+{
+ struct i915_address_space *vm = &ctx->ppgtt->vm;
+ struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
u32 *batch;
int err;
vma = i915_vma_instance(spin->obj, vm, NULL);
if (IS_ERR(vma))
- return PTR_ERR(vma);
+ return ERR_CAST(vma);
hws = i915_vma_instance(spin->hws, vm, NULL);
if (IS_ERR(hws))
- return PTR_ERR(hws);
+ return ERR_CAST(hws);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- return err;
+ return ERR_PTR(err);
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
goto unpin_vma;
- err = i915_vma_move_to_active(vma, rq, 0);
- if (err)
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
}
- err = i915_vma_move_to_active(hws, rq, 0);
+ err = move_to_active(vma, rq, 0);
if (err)
- goto unpin_hws;
+ goto cancel_rq;
- if (!i915_gem_object_has_active_reference(hws->obj)) {
- i915_gem_object_get(hws->obj);
- i915_gem_object_set_active_reference(hws->obj);
- }
+ err = move_to_active(hws, rq, 0);
+ if (err)
+ goto cancel_rq;
batch = spin->batch;
@@ -127,35 +144,18 @@ static int emit_recurse_batch(struct igt_spinner *spin,
i915_gem_chipset_flush(spin->i915);
- err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+ err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+cancel_rq:
+ if (err) {
+ i915_request_skip(rq, err);
+ i915_request_add(rq);
+ }
unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
- return err;
-}
-
-struct i915_request *
-igt_spinner_create_request(struct igt_spinner *spin,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- u32 arbitration_command)
-{
- struct i915_request *rq;
- int err;
-
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return rq;
-
- err = emit_recurse_batch(spin, rq, arbitration_command);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
+ return err ? ERR_PTR(err) : rq;
}
static u32
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 32cba4cae31a..c5e0a0e98fcb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -137,12 +137,13 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client)
static int igt_guc_clients(void *args)
{
struct drm_i915_private *dev_priv = args;
+ intel_wakeref_t wakeref;
struct intel_guc *guc;
int err = 0;
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
@@ -225,7 +226,7 @@ out:
guc_clients_create(guc);
guc_clients_enable(guc);
unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
@@ -238,13 +239,14 @@ unlock:
static int igt_guc_doorbells(void *arg)
{
struct drm_i915_private *dev_priv = arg;
+ intel_wakeref_t wakeref;
struct intel_guc *guc;
int i, err = 0;
u16 db_id;
GEM_BUG_ON(!HAS_GUC(dev_priv));
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_get(dev_priv);
+ wakeref = intel_runtime_pm_get(dev_priv);
guc = &dev_priv->guc;
if (!guc) {
@@ -337,7 +339,7 @@ out:
guc_client_free(clients[i]);
}
unlock:
- intel_runtime_pm_put(dev_priv);
+ intel_runtime_pm_put(dev_priv, wakeref);
mutex_unlock(&dev_priv->drm.struct_mutex);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 5910da3e7d79..12550b55c42f 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -103,52 +103,87 @@ static u64 hws_address(const struct i915_vma *hws,
return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
}
-static int emit_recurse_batch(struct hang *h,
- struct i915_request *rq)
+static int move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ unsigned int flags)
+{
+ int err;
+
+ err = i915_vma_move_to_active(vma, rq, flags);
+ if (err)
+ return err;
+
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ return 0;
+}
+
+static struct i915_request *
+hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm =
- rq->gem_context->ppgtt ?
- &rq->gem_context->ppgtt->vm :
- &i915->ggtt.vm;
+ h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
u32 *batch;
int err;
+ if (i915_gem_object_is_active(h->obj)) {
+ struct drm_i915_gem_object *obj;
+ void *vaddr;
+
+ obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vaddr = i915_gem_object_pin_map(obj,
+ i915_coherent_map_type(h->i915));
+ if (IS_ERR(vaddr)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(vaddr);
+ }
+
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
+
+ h->obj = obj;
+ h->batch = vaddr;
+ }
+
vma = i915_vma_instance(h->obj, vm, NULL);
if (IS_ERR(vma))
- return PTR_ERR(vma);
+ return ERR_CAST(vma);
hws = i915_vma_instance(h->hws, vm, NULL);
if (IS_ERR(hws))
- return PTR_ERR(hws);
+ return ERR_CAST(hws);
err = i915_vma_pin(vma, 0, 0, PIN_USER);
if (err)
- return err;
+ return ERR_PTR(err);
err = i915_vma_pin(hws, 0, 0, PIN_USER);
if (err)
goto unpin_vma;
- err = i915_vma_move_to_active(vma, rq, 0);
- if (err)
+ rq = i915_request_alloc(engine, h->ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
goto unpin_hws;
-
- if (!i915_gem_object_has_active_reference(vma->obj)) {
- i915_gem_object_get(vma->obj);
- i915_gem_object_set_active_reference(vma->obj);
}
- err = i915_vma_move_to_active(hws, rq, 0);
+ err = move_to_active(vma, rq, 0);
if (err)
- goto unpin_hws;
+ goto cancel_rq;
- if (!i915_gem_object_has_active_reference(hws->obj)) {
- i915_gem_object_get(hws->obj);
- i915_gem_object_set_active_reference(hws->obj);
- }
+ err = move_to_active(hws, rq, 0);
+ if (err)
+ goto cancel_rq;
batch = h->batch;
if (INTEL_GEN(i915) >= 8) {
@@ -213,52 +248,16 @@ static int emit_recurse_batch(struct hang *h,
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+cancel_rq:
+ if (err) {
+ i915_request_skip(rq, err);
+ i915_request_add(rq);
+ }
unpin_hws:
i915_vma_unpin(hws);
unpin_vma:
i915_vma_unpin(vma);
- return err;
-}
-
-static struct i915_request *
-hang_create_request(struct hang *h, struct intel_engine_cs *engine)
-{
- struct i915_request *rq;
- int err;
-
- if (i915_gem_object_is_active(h->obj)) {
- struct drm_i915_gem_object *obj;
- void *vaddr;
-
- obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- vaddr = i915_gem_object_pin_map(obj,
- i915_coherent_map_type(h->i915));
- if (IS_ERR(vaddr)) {
- i915_gem_object_put(obj);
- return ERR_CAST(vaddr);
- }
-
- i915_gem_object_unpin_map(h->obj);
- i915_gem_object_put(h->obj);
-
- h->obj = obj;
- h->batch = vaddr;
- }
-
- rq = i915_request_alloc(engine, h->ctx);
- if (IS_ERR(rq))
- return rq;
-
- err = emit_recurse_batch(h, rq);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
+ return err ? ERR_PTR(err) : rq;
}
static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
@@ -386,6 +385,31 @@ static int igt_global_reset(void *arg)
return err;
}
+static int igt_wedged_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ intel_wakeref_t wakeref;
+
+ /* Check that we can recover a wedged device with a GPU reset */
+
+ igt_global_reset_lock(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ i915_gem_set_wedged(i915);
+ GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
+
+ set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
+ i915_reset(i915, ALL_ENGINES, NULL);
+ GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
+
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ igt_global_reset_unlock(i915);
+
+ return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+}
+
static bool wait_for_idle(struct intel_engine_cs *engine)
{
return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
@@ -1449,10 +1473,183 @@ err_unlock:
return err;
}
+static void __preempt_begin(void)
+{
+ preempt_disable();
+}
+
+static void __preempt_end(void)
+{
+ preempt_enable();
+}
+
+static void __softirq_begin(void)
+{
+ local_bh_disable();
+}
+
+static void __softirq_end(void)
+{
+ local_bh_enable();
+}
+
+static void __hardirq_begin(void)
+{
+ local_irq_disable();
+}
+
+static void __hardirq_end(void)
+{
+ local_irq_enable();
+}
+
+struct atomic_section {
+ const char *name;
+ void (*critical_section_begin)(void);
+ void (*critical_section_end)(void);
+};
+
+static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
+ const struct atomic_section *p,
+ const char *mode)
+{
+ struct tasklet_struct * const t = &engine->execlists.tasklet;
+ int err;
+
+ GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
+ engine->name, mode, p->name);
+
+ tasklet_disable_nosync(t);
+ p->critical_section_begin();
+
+ err = i915_reset_engine(engine, NULL);
+
+ p->critical_section_end();
+ tasklet_enable(t);
+
+ if (err)
+ pr_err("i915_reset_engine(%s:%s) failed under %s\n",
+ engine->name, mode, p->name);
+
+ return err;
+}
+
+static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
+ const struct atomic_section *p)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct i915_request *rq;
+ struct hang h;
+ int err;
+
+ err = __igt_atomic_reset_engine(engine, p, "idle");
+ if (err)
+ return err;
+
+ err = hang_init(&h, i915);
+ if (err)
+ return err;
+
+ rq = hang_create_request(&h, engine);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (wait_until_running(&h, rq)) {
+ err = __igt_atomic_reset_engine(engine, p, "active");
+ } else {
+ pr_err("%s(%s): Failed to start request %llx, at %x\n",
+ __func__, engine->name,
+ rq->fence.seqno, hws_seqno(&h, rq));
+ err = -EIO;
+ }
+
+ if (err == 0) {
+ struct igt_wedge_me w;
+
+ igt_wedge_on_timeout(&w, i915, HZ / 20 /* 50ms timeout*/)
+ i915_request_wait(rq,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ err = -EIO;
+ }
+
+ i915_request_put(rq);
+out:
+ hang_fini(&h);
+ return err;
+}
+
+static void force_reset(struct drm_i915_private *i915)
+{
+ i915_gem_set_wedged(i915);
+ set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
+ i915_reset(i915, 0, NULL);
+}
+
+static int igt_atomic_reset(void *arg)
+{
+ static const struct atomic_section phases[] = {
+ { "preempt", __preempt_begin, __preempt_end },
+ { "softirq", __softirq_begin, __softirq_end },
+ { "hardirq", __hardirq_begin, __hardirq_end },
+ { }
+ };
+ struct drm_i915_private *i915 = arg;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ /* Check that the resets are usable from atomic context */
+
+ if (USES_GUC_SUBMISSION(i915))
+ return 0; /* guc is dead; long live the guc */
+
+ igt_global_reset_lock(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ /* Flush any requests before we get started and check basics */
+ force_reset(i915);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ goto unlock;
+
+ if (intel_has_reset_engine(i915)) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ const typeof(*phases) *p;
+
+ for (p = phases; p->name; p++) {
+ err = igt_atomic_reset_engine(engine, p);
+ if (err)
+ goto out;
+ }
+ }
+ }
+
+out:
+ /* As we poke around the guts, do a full reset before continuing. */
+ force_reset(i915);
+
+unlock:
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ igt_global_reset_unlock(i915);
+
+ return err;
+}
+
int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
+ SUBTEST(igt_wedged_reset),
SUBTEST(igt_hang_sanitycheck),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
@@ -1463,7 +1660,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_reset_evict_ppgtt),
SUBTEST(igt_reset_evict_fence),
SUBTEST(igt_handle_error),
+ SUBTEST(igt_atomic_reset),
};
+ intel_wakeref_t wakeref;
bool saved_hangcheck;
int err;
@@ -1473,7 +1672,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (i915_terminally_wedged(&i915->gpu_error))
return -EIO; /* we're long past hope of a successful reset */
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
err = i915_subtests(tests, i915);
@@ -1483,7 +1682,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex);
i915_modparams.enable_hangcheck = saved_hangcheck;
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index ca461e3a5f27..2b2ecd76c2ac 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -4,6 +4,8 @@
* Copyright © 2018 Intel Corporation
*/
+#include "../i915_reset.h"
+
#include "../i915_selftest.h"
#include "igt_flush_test.h"
#include "igt_spinner.h"
@@ -18,13 +20,14 @@ static int live_sanitycheck(void *arg)
struct i915_gem_context *ctx;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin, i915))
goto err_unlock;
@@ -65,7 +68,7 @@ err_spin:
igt_spinner_fini(&spin);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -77,13 +80,14 @@ static int live_preempt(void *arg)
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -158,7 +162,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -171,13 +175,14 @@ static int live_late_preempt(void *arg)
struct intel_engine_cs *engine;
struct i915_sched_attr attr = {};
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -251,7 +256,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
@@ -270,6 +275,7 @@ static int live_preempt_hang(void *arg)
struct igt_spinner spin_hi, spin_lo;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = -ENOMEM;
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
@@ -279,7 +285,7 @@ static int live_preempt_hang(void *arg)
return 0;
mutex_lock(&i915->drm.struct_mutex);
- intel_runtime_pm_get(i915);
+ wakeref = intel_runtime_pm_get(i915);
if (igt_spinner_init(&spin_hi, i915))
goto err_unlock;
@@ -374,7 +380,7 @@ err_spin_hi:
igt_spinner_fini(&spin_hi);
err_unlock:
igt_flush_test(i915, I915_WAIT_LOCKED);
- intel_runtime_pm_put(i915);
+ intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
return err;
}
@@ -522,7 +528,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
- INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
return 0;
}
@@ -550,7 +556,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
- INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
return 0;
}
@@ -562,6 +568,7 @@ static int live_preempt_smoke(void *arg)
.ncontext = 1024,
};
const unsigned int phase[] = { 0, BATCH };
+ intel_wakeref_t wakeref;
int err = -ENOMEM;
u32 *cs;
int n;
@@ -576,7 +583,7 @@ static int live_preempt_smoke(void *arg)
return -ENOMEM;
mutex_lock(&smoke.i915->drm.struct_mutex);
- intel_runtime_pm_get(smoke.i915);
+ wakeref = intel_runtime_pm_get(smoke.i915);
smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
if (IS_ERR(smoke.batch)) {
@@ -627,7 +634,7 @@ err_ctx:
err_batch:
i915_gem_object_put(smoke.batch);
err_unlock:
- intel_runtime_pm_put(smoke.i915);
+ intel_runtime_pm_put(smoke.i915, wakeref);
mutex_unlock(&smoke.i915->drm.struct_mutex);
kfree(smoke.contexts);
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index 67017d5175b8..a8cac56be835 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -5,6 +5,7 @@
*/
#include "../i915_selftest.h"
+#include "../i915_reset.h"
#include "igt_flush_test.h"
#include "igt_reset.h"
@@ -12,13 +13,59 @@
#include "igt_wedge_me.h"
#include "mock_context.h"
+#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
+struct wa_lists {
+ struct i915_wa_list gt_wa_list;
+ struct {
+ char name[REF_NAME_MAX];
+ struct i915_wa_list wa_list;
+ } engine[I915_NUM_ENGINES];
+};
+
+static void
+reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ memset(lists, 0, sizeof(*lists));
+
+ wa_init_start(&lists->gt_wa_list, "GT_REF");
+ gt_init_workarounds(i915, &lists->gt_wa_list);
+ wa_init_finish(&lists->gt_wa_list);
+
+ for_each_engine(engine, i915, id) {
+ struct i915_wa_list *wal = &lists->engine[id].wa_list;
+ char *name = lists->engine[id].name;
+
+ snprintf(name, REF_NAME_MAX, "%s_REF", engine->name);
+
+ wa_init_start(wal, name);
+ engine_init_workarounds(engine, wal);
+ wa_init_finish(wal);
+ }
+}
+
+static void
+reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ intel_wa_list_free(&lists->engine[id].wa_list);
+
+ intel_wa_list_free(&lists->gt_wa_list);
+}
+
static struct drm_i915_gem_object *
read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
{
+ const u32 base = engine->mmio_base;
struct drm_i915_gem_object *result;
+ intel_wakeref_t wakeref;
struct i915_request *rq;
struct i915_vma *vma;
- const u32 base = engine->mmio_base;
u32 srm, *cs;
int err;
int i;
@@ -47,9 +94,9 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (err)
goto err_obj;
- intel_runtime_pm_get(engine->i915);
- rq = i915_request_alloc(engine, ctx);
- intel_runtime_pm_put(engine->i915);
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = i915_request_alloc(engine, ctx);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_pin;
@@ -183,20 +230,22 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
{
struct i915_gem_context *ctx;
struct i915_request *rq;
+ intel_wakeref_t wakeref;
int err = 0;
ctx = kernel_context(engine->i915);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- intel_runtime_pm_get(engine->i915);
-
- if (spin)
- rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
- else
- rq = i915_request_alloc(engine, ctx);
-
- intel_runtime_pm_put(engine->i915);
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(engine->i915, wakeref) {
+ if (spin)
+ rq = igt_spinner_create_request(spin,
+ ctx, engine,
+ MI_NOOP);
+ else
+ rq = i915_request_alloc(engine, ctx);
+ }
kernel_context_close(ctx);
@@ -228,6 +277,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err;
pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
@@ -253,9 +303,8 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
if (err)
goto out;
- intel_runtime_pm_get(i915);
- err = reset(engine);
- intel_runtime_pm_put(i915);
+ with_intel_runtime_pm(i915, wakeref)
+ err = reset(engine);
if (want_spin) {
igt_spinner_end(&spin);
@@ -326,16 +375,17 @@ out:
return err;
}
-static bool verify_gt_engine_wa(struct drm_i915_private *i915, const char *str)
+static bool verify_gt_engine_wa(struct drm_i915_private *i915,
+ struct wa_lists *lists, const char *str)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
bool ok = true;
- ok &= intel_gt_verify_workarounds(i915, str);
+ ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
for_each_engine(engine, i915, id)
- ok &= intel_engine_verify_workarounds(engine, str);
+ ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
return ok;
}
@@ -345,6 +395,8 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
{
struct drm_i915_private *i915 = arg;
struct i915_gpu_error *error = &i915->gpu_error;
+ intel_wakeref_t wakeref;
+ struct wa_lists lists;
bool ok;
if (!intel_has_gpu_reset(i915))
@@ -353,19 +405,22 @@ live_gpu_reset_gt_engine_workarounds(void *arg)
pr_info("Verifying after GPU reset...\n");
igt_global_reset_lock(i915);
+ wakeref = intel_runtime_pm_get(i915);
- ok = verify_gt_engine_wa(i915, "before reset");
+ reference_lists_init(i915, &lists);
+
+ ok = verify_gt_engine_wa(i915, &lists, "before reset");
if (!ok)
goto out;
- intel_runtime_pm_get(i915);
set_bit(I915_RESET_HANDOFF, &error->flags);
i915_reset(i915, ALL_ENGINES, "live_workarounds");
- intel_runtime_pm_put(i915);
- ok = verify_gt_engine_wa(i915, "after reset");
+ ok = verify_gt_engine_wa(i915, &lists, "after reset");
out:
+ reference_lists_fini(i915, &lists);
+ intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
return ok ? 0 : -ESRCH;
@@ -380,6 +435,8 @@ live_engine_reset_gt_engine_workarounds(void *arg)
struct igt_spinner spin;
enum intel_engine_id id;
struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct wa_lists lists;
int ret = 0;
if (!intel_has_reset_engine(i915))
@@ -390,23 +447,24 @@ live_engine_reset_gt_engine_workarounds(void *arg)
return PTR_ERR(ctx);
igt_global_reset_lock(i915);
+ wakeref = intel_runtime_pm_get(i915);
+
+ reference_lists_init(i915, &lists);
for_each_engine(engine, i915, id) {
bool ok;
pr_info("Verifying after %s reset...\n", engine->name);
- ok = verify_gt_engine_wa(i915, "before reset");
+ ok = verify_gt_engine_wa(i915, &lists, "before reset");
if (!ok) {
ret = -ESRCH;
goto err;
}
- intel_runtime_pm_get(i915);
i915_reset_engine(engine, "live_workarounds");
- intel_runtime_pm_put(i915);
- ok = verify_gt_engine_wa(i915, "after idle reset");
+ ok = verify_gt_engine_wa(i915, &lists, "after idle reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -416,13 +474,10 @@ live_engine_reset_gt_engine_workarounds(void *arg)
if (ret)
goto err;
- intel_runtime_pm_get(i915);
-
rq = igt_spinner_create_request(&spin, ctx, engine, MI_NOOP);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
igt_spinner_fini(&spin);
- intel_runtime_pm_put(i915);
goto err;
}
@@ -431,19 +486,16 @@ live_engine_reset_gt_engine_workarounds(void *arg)
if (!igt_wait_for_spinner(&spin, rq)) {
pr_err("Spinner failed to start\n");
igt_spinner_fini(&spin);
- intel_runtime_pm_put(i915);
ret = -ETIMEDOUT;
goto err;
}
i915_reset_engine(engine, "live_workarounds");
- intel_runtime_pm_put(i915);
-
igt_spinner_end(&spin);
igt_spinner_fini(&spin);
- ok = verify_gt_engine_wa(i915, "after busy reset");
+ ok = verify_gt_engine_wa(i915, &lists, "after busy reset");
if (!ok) {
ret = -ESRCH;
goto err;
@@ -451,6 +503,8 @@ live_engine_reset_gt_engine_workarounds(void *arg)
}
err:
+ reference_lists_fini(i915, &lists);
+ intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
kernel_context_close(ctx);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index d937bdff26f9..b646cdcdd602 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -45,11 +45,8 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
- struct intel_context *ce = &ctx->__engine[n];
-
- ce->gem_context = ctx;
- }
+ for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
+ intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index d0c44c18db42..442ec2aeec81 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -30,6 +30,36 @@ struct mock_ring {
struct i915_timeline timeline;
};
+static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
+{
+ const unsigned long sz = PAGE_SIZE / 2;
+ struct mock_ring *ring;
+
+ ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ i915_timeline_init(engine->i915, &ring->timeline, engine->name);
+
+ ring->base.size = sz;
+ ring->base.effective_size = sz;
+ ring->base.vaddr = (void *)(ring + 1);
+ ring->base.timeline = &ring->timeline;
+
+ INIT_LIST_HEAD(&ring->base.request_list);
+ intel_ring_update_space(&ring->base);
+
+ return &ring->base;
+}
+
+static void mock_ring_free(struct intel_ring *base)
+{
+ struct mock_ring *ring = container_of(base, typeof(*ring), base);
+
+ i915_timeline_fini(&ring->timeline);
+ kfree(ring);
+}
+
static struct mock_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
@@ -37,11 +67,10 @@ static struct mock_request *first_request(struct mock_engine *engine)
link);
}
-static void advance(struct mock_engine *engine,
- struct mock_request *request)
+static void advance(struct mock_request *request)
{
list_del_init(&request->link);
- mock_seqno_advance(&engine->base, request->base.global_seqno);
+ mock_seqno_advance(request->base.engine, request->base.global_seqno);
}
static void hw_delay_complete(struct timer_list *t)
@@ -54,7 +83,7 @@ static void hw_delay_complete(struct timer_list *t)
/* Timer fired, first request is complete */
request = first_request(engine);
if (request)
- advance(engine, request);
+ advance(request);
/*
* Also immediately signal any subsequent 0-delay requests, but
@@ -66,7 +95,7 @@ static void hw_delay_complete(struct timer_list *t)
break;
}
- advance(engine, request);
+ advance(request);
}
spin_unlock(&engine->hw_lock);
@@ -80,6 +109,9 @@ static void mock_context_unpin(struct intel_context *ce)
static void mock_context_destroy(struct intel_context *ce)
{
GEM_BUG_ON(ce->pin_count);
+
+ if (ce->ring)
+ mock_ring_free(ce->ring);
}
static const struct intel_context_ops mock_context_ops = {
@@ -93,13 +125,22 @@ mock_context_pin(struct intel_engine_cs *engine,
{
struct intel_context *ce = to_intel_context(ctx, engine);
- if (!ce->pin_count++) {
- i915_gem_context_get(ctx);
- ce->ring = engine->buffer;
- ce->ops = &mock_context_ops;
+ if (ce->pin_count++)
+ return ce;
+
+ if (!ce->ring) {
+ ce->ring = mock_ring(engine);
+ if (!ce->ring)
+ goto err;
}
+ ce->ops = &mock_context_ops;
+ i915_gem_context_get(ctx);
return ce;
+
+err:
+ ce->pin_count = 0;
+ return ERR_PTR(-ENOMEM);
}
static int mock_request_alloc(struct i915_request *request)
@@ -138,43 +179,11 @@ static void mock_submit_request(struct i915_request *request)
if (mock->delay)
mod_timer(&engine->hw_delay, jiffies + mock->delay);
else
- advance(engine, mock);
+ advance(mock);
}
spin_unlock_irq(&engine->hw_lock);
}
-static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
-{
- const unsigned long sz = PAGE_SIZE / 2;
- struct mock_ring *ring;
-
- BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
-
- ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
- if (!ring)
- return NULL;
-
- i915_timeline_init(engine->i915, &ring->timeline, engine->name);
-
- ring->base.size = sz;
- ring->base.effective_size = sz;
- ring->base.vaddr = (void *)(ring + 1);
- ring->base.timeline = &ring->timeline;
-
- INIT_LIST_HEAD(&ring->base.request_list);
- intel_ring_update_space(&ring->base);
-
- return &ring->base;
-}
-
-static void mock_ring_free(struct intel_ring *base)
-{
- struct mock_ring *ring = container_of(base, typeof(*ring), base);
-
- i915_timeline_fini(&ring->timeline);
- kfree(ring);
-}
-
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id)
@@ -203,24 +212,17 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
intel_engine_init_breadcrumbs(&engine->base);
- engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
/* fake hw queue */
spin_lock_init(&engine->hw_lock);
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
- engine->base.buffer = mock_ring(&engine->base);
- if (!engine->base.buffer)
- goto err_breadcrumbs;
-
if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
- goto err_ring;
+ goto err_breadcrumbs;
return &engine->base;
-err_ring:
- mock_ring_free(engine->base.buffer);
err_breadcrumbs:
intel_engine_fini_breadcrumbs(&engine->base);
i915_timeline_fini(&engine->base.timeline);
@@ -237,10 +239,8 @@ void mock_engine_flush(struct intel_engine_cs *engine)
del_timer_sync(&mock->hw_delay);
spin_lock_irq(&mock->hw_lock);
- list_for_each_entry_safe(request, rn, &mock->hw_queue, link) {
- list_del_init(&request->link);
- mock_seqno_advance(&mock->base, request->base.global_seqno);
- }
+ list_for_each_entry_safe(request, rn, &mock->hw_queue, link)
+ advance(request);
spin_unlock_irq(&mock->hw_lock);
}
@@ -263,8 +263,6 @@ void mock_engine_free(struct intel_engine_cs *engine)
__intel_context_unpin(engine->i915->kernel_context, engine);
- mock_ring_free(engine->buffer);
-
intel_engine_fini_breadcrumbs(engine);
i915_timeline_fini(&engine->timeline);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 43ed8b28aeaa..5477ad4a7e7d 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -72,7 +72,7 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_drain_freed_objects(i915);
mutex_lock(&i915->drm.struct_mutex);
- mock_fini_ggtt(i915);
+ mock_fini_ggtt(&i915->ggtt);
mutex_unlock(&i915->drm.struct_mutex);
WARN_ON(!list_empty(&i915->gt.timelines));
@@ -147,22 +147,24 @@ struct drm_i915_private *mock_gem_device(void)
pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
pdev->dev.release = release_dev;
dev_set_name(&pdev->dev, "mock");
- dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU)
/* hack to disable iommu for the fake device; force identity mapping */
pdev->dev.archdata.iommu = (void *)-1;
#endif
+ i915 = (struct drm_i915_private *)(pdev + 1);
+ pci_set_drvdata(pdev, i915);
+
+ intel_runtime_pm_init_early(i915);
+
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (pm_runtime_enabled(&pdev->dev))
WARN_ON(pm_runtime_get_sync(&pdev->dev));
- i915 = (struct drm_i915_private *)(pdev + 1);
- pci_set_drvdata(pdev, i915);
-
err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
if (err) {
pr_err("Failed to initialise mock GEM device: err=%d\n", err);
@@ -186,6 +188,7 @@ struct drm_i915_private *mock_gem_device(void)
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
+ mutex_init(&i915->gpu_error.wedge_mutex);
i915->wq = alloc_ordered_workqueue("mock", 0);
if (!i915->wq)
@@ -229,7 +232,7 @@ struct drm_i915_private *mock_gem_device(void)
mutex_lock(&i915->drm.struct_mutex);
- mock_init_ggtt(i915);
+ mock_init_ggtt(i915, &i915->ggtt);
mkwrite_device_info(i915)->ring_mask = BIT(0);
i915->kernel_context = mock_context(i915, NULL);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 6ae418c76015..cd83929fde8e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -70,7 +70,7 @@ mock_ppgtt(struct drm_i915_private *i915,
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->vm.file = ERR_PTR(-ENODEV);
- i915_address_space_init(&ppgtt->vm, i915);
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
ppgtt->vm.clear_range = nop_clear_range;
ppgtt->vm.insert_page = mock_insert_page;
@@ -97,11 +97,12 @@ static void mock_unbind_ggtt(struct i915_vma *vma)
{
}
-void mock_init_ggtt(struct drm_i915_private *i915)
+void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
+ memset(ggtt, 0, sizeof(*ggtt));
ggtt->vm.i915 = i915;
+ ggtt->vm.is_ggtt = true;
ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
@@ -117,14 +118,10 @@ void mock_init_ggtt(struct drm_i915_private *i915)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
- i915_address_space_init(&ggtt->vm, i915);
-
- ggtt->vm.is_ggtt = true;
+ i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
}
-void mock_fini_ggtt(struct drm_i915_private *i915)
+void mock_fini_ggtt(struct i915_ggtt *ggtt)
{
- struct i915_ggtt *ggtt = &i915->ggtt;
-
i915_address_space_fini(&ggtt->vm);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
index 9a0a833bb545..40d544bde1d5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.h
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -25,8 +25,8 @@
#ifndef __MOCK_GTT_H
#define __MOCK_GTT_H
-void mock_init_ggtt(struct drm_i915_private *i915);
-void mock_fini_ggtt(struct drm_i915_private *i915);
+void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt);
+void mock_fini_ggtt(struct i915_ggtt *ggtt);
struct i915_hw_ppgtt *
mock_ppgtt(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 9fc8085f76dc..4d47910e5184 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -23,7 +23,6 @@
* Author: Jani Nikula <jani.nikula@intel.com>
*/
-#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -290,6 +289,11 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
+ if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB888)
+ pipe_config->pipe_bpp = 24;
+ else
+ pipe_config->pipe_bpp = 18;
+
if (IS_GEN9_LP(dev_priv)) {
/* Enable Frame time stamp based scanline reporting */
adjusted_mode->private_flags |=
@@ -674,6 +678,10 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
LANE_CONFIGURATION_DUAL_LINK_B :
LANE_CONFIGURATION_DUAL_LINK_A;
}
+
+ if (intel_dsi->pixel_format != MIPI_DSI_FMT_RGB888)
+ temp |= DITHERING_ENABLE;
+
/* assert ip_tg_enable signal */
I915_WRITE(port_ctrl, temp | DPI_ENABLE);
POSTING_READ(port_ctrl);
@@ -960,13 +968,15 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ intel_wakeref_t wakeref;
enum port port;
bool active = false;
DRM_DEBUG_KMS("\n");
- if (!intel_display_power_get_if_enabled(dev_priv,
- encoder->power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain);
+ if (!wakeref)
return false;
/*
@@ -1022,7 +1032,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
}
out_put_power:
- intel_display_power_put(dev_priv, encoder->power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
return active;
}
@@ -1058,10 +1068,8 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
}
fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
- pipe_config->pipe_bpp =
- mipi_dsi_pixel_format_to_bpp(
- pixel_format_from_register_bits(fmt));
- bpp = pipe_config->pipe_bpp;
+ bpp = mipi_dsi_pixel_format_to_bpp(
+ pixel_format_from_register_bits(fmt));
/* Enable Frame time stamo based scanline reporting */
adjusted_mode->private_flags |=
@@ -1199,11 +1207,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
if (IS_GEN9_LP(dev_priv)) {
bxt_dsi_get_pipe_config(encoder, pipe_config);
- pclk = bxt_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
- pipe_config);
+ pclk = bxt_dsi_get_pclk(encoder, pipe_config);
} else {
- pclk = vlv_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
- pipe_config);
+ pclk = vlv_dsi_get_pclk(encoder, pipe_config);
}
if (pclk) {
@@ -1575,6 +1581,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
enum drm_panel_orientation orientation;
struct intel_plane *plane;
struct intel_crtc *crtc;
+ intel_wakeref_t wakeref;
enum pipe pipe;
u32 val;
@@ -1585,7 +1592,8 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
plane = to_intel_plane(crtc->base.primary);
power_domain = POWER_DOMAIN_PIPE(pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wakeref)
return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
val = I915_READ(DSPCNTR(plane->i9xx_plane));
@@ -1597,7 +1605,7 @@ vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
else
orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, power_domain, wakeref);
return orientation;
}
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index a132a8037ecc..954d5a8c4fa7 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -252,20 +252,12 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
-static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp)
-{
- int bpp = mipi_dsi_pixel_format_to_bpp(fmt);
-
- WARN(bpp != pipe_bpp,
- "bpp match assertion failure (expected %d, current %d)\n",
- bpp, pipe_bpp);
-}
-
-u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
u32 dsi_clock, pclk;
u32 pll_ctl, pll_div;
u32 m = 0, p = 0, n;
@@ -319,15 +311,12 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
dsi_clock = (m * refclk) / (p * n);
- /* pixel_format and pipe_bpp should agree */
- assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
-
- pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, pipe_bpp);
+ pclk = DIV_ROUND_CLOSEST(dsi_clock * intel_dsi->lane_count, bpp);
return pclk;
}
-u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
+u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
u32 pclk;
@@ -335,12 +324,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
u32 dsi_ratio;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- /* Divide by zero */
- if (!pipe_bpp) {
- DRM_ERROR("Invalid BPP(0)\n");
- return 0;
- }
+ int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
@@ -348,10 +332,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2;
- /* pixel_format and pipe_bpp should agree */
- assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp);
-
- pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp);
+ pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
return pclk;