diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/scheduler.c')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 61 |
1 files changed, 61 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index ea34003d6dd2..1ad8c5e1455d 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -334,6 +334,28 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) i915_gem_object_put(wa_ctx->indirect_ctx.obj); } +static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, + struct i915_gem_context *ctx) +{ + struct intel_vgpu_mm *mm = workload->shadow_mm; + struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; + int i = 0; + + if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) + return -1; + + if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { + px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; + } else { + for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) { + px_dma(ppgtt->pdp.page_directory[i]) = + mm->ppgtt_mm.shadow_pdps[i]; + } + } + + return 0; +} + /** * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and * shadow it as well, include ringbuffer,wa_ctx and ctx. @@ -358,6 +380,12 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) if (workload->req) return 0; + ret = set_context_ppgtt_from_shadow(workload, shadow_ctx); + if (ret < 0) { + gvt_vgpu_err("workload shadow ppgtt isn't ready\n"); + return ret; + } + /* pin shadow context by gvt even the shadow context will be pinned * when i915 alloc request. That is because gvt will update the guest * context from shadow context when workload is completed, and at that @@ -1051,6 +1079,21 @@ err: return ret; } +static void +i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s) +{ + struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; + int i; + + if (i915_vm_is_48bit(&i915_ppgtt->vm)) + px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4; + else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) + px_dma(i915_ppgtt->pdp.page_directory[i]) = + s->i915_context_pdps[i]; + } +} + /** * intel_vgpu_clean_submission - free submission-related resource for vGPU * @vgpu: a vGPU @@ -1063,6 +1106,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) struct intel_vgpu_submission *s = &vgpu->submission; intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); + i915_context_ppgtt_root_restore(s); i915_gem_context_put(s->shadow_ctx); kmem_cache_destroy(s->workloads); } @@ -1088,6 +1132,21 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, s->ops->reset(vgpu, engine_mask); } +static void +i915_context_ppgtt_root_save(struct intel_vgpu_submission *s) +{ + struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt; + int i; + + if (i915_vm_is_48bit(&i915_ppgtt->vm)) + s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4); + else { + for (i = 0; i < GEN8_3LVL_PDPES; i++) + s->i915_context_pdps[i] = + px_dma(i915_ppgtt->pdp.page_directory[i]); + } +} + /** * intel_vgpu_setup_submission - setup submission-related resource for vGPU * @vgpu: a vGPU @@ -1110,6 +1169,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) if (IS_ERR(s->shadow_ctx)) return PTR_ERR(s->shadow_ctx); + i915_context_ppgtt_root_save(s); + bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", |