summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/gvt/scheduler.c
diff options
context:
space:
mode:
authorZhao Yan <yan.y.zhao@intel.com>2018-08-01 00:15:31 -0400
committerZhenyu Wang <zhenyuw@linux.intel.com>2018-08-02 13:27:54 +0800
commit8bfa02c885ee538353f52b36d16de90655e1bcde (patch)
tree6f2436d8f2acf3a5d8ec13ff8fb08fd072d0458e /drivers/gpu/drm/i915/gvt/scheduler.c
parentdb47685da1d8224f0eaa35c32e1a1ea97b05bae0 (diff)
drm/i915/gvt: only copy the first page for restore inhibit context
if a context is a restore inhibit context, gfx hw only load the first page for ring context, so we only need to copy from guest the 1 page too. v3: use "return" instead of "goto" for inhibit case. (zhenyu wang) v2: move judgement of restore inhibit to a macro in mmio_context.h Signed-off-by: Zhao Yan <yan.y.zhao@intel.com> Acked-by: Hang Yuan <hang.yuan@linux.intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c60
1 files changed, 31 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 928818f218f7..049c0ea324fa 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -132,35 +132,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
unsigned long context_gpa, context_page_num;
int i;
- gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
-
- context_page_num = context_page_num >> PAGE_SHIFT;
-
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
- context_page_num = 19;
-
- i = 2;
-
- while (i < context_page_num) {
- context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- (u32)((workload->ctx_desc.lrca + i) <<
- I915_GTT_PAGE_SHIFT));
- if (context_gpa == INTEL_GVT_INVALID_ADDR) {
- gvt_vgpu_err("Invalid guest context descriptor\n");
- return -EFAULT;
- }
-
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
- dst = kmap(page);
- intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
- I915_GTT_PAGE_SIZE);
- kunmap(page);
- i++;
- }
-
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap(page);
@@ -195,6 +166,37 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
kunmap(page);
+
+ if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
+ return 0;
+
+ gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
+ workload->ctx_desc.lrca);
+
+ context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+
+ context_page_num = context_page_num >> PAGE_SHIFT;
+
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ context_page_num = 19;
+
+ i = 2;
+ while (i < context_page_num) {
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
+ (u32)((workload->ctx_desc.lrca + i) <<
+ I915_GTT_PAGE_SHIFT));
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
+ gvt_vgpu_err("Invalid guest context descriptor\n");
+ return -EFAULT;
+ }
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
+ dst = kmap(page);
+ intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
+ I915_GTT_PAGE_SIZE);
+ kunmap(page);
+ i++;
+ }
return 0;
}