diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
24 files changed, 385 insertions, 421 deletions
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 5fbd2ae95869..2b73f5ff0d02 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -120,7 +120,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, pipe_config->hw.adjusted_mode.flags |= flags; if ((tmp & SDVO_COLOR_FORMAT_MASK) == HDMI_COLOR_FORMAT_12bpc) - dotclock = pipe_config->port_clock * 2 / 3; + dotclock = DIV_ROUND_CLOSEST(pipe_config->port_clock * 2, 3); else dotclock = pipe_config->port_clock; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index dd008ba8afe3..461c62c88413 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -8130,6 +8130,17 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) drm_helper_move_panel_connectors_to_head(&dev_priv->drm); } +static int max_dotclock(struct drm_i915_private *i915) +{ + int max_dotclock = i915->max_dotclk_freq; + + /* icl+ might use bigjoiner */ + if (DISPLAY_VER(i915) >= 11) + max_dotclock *= 2; + + return max_dotclock; +} + static enum drm_mode_status intel_mode_valid(struct drm_device *dev, const struct drm_display_mode *mode) @@ -8167,6 +8178,13 @@ intel_mode_valid(struct drm_device *dev, DRM_MODE_FLAG_CLKDIV2)) return MODE_BAD; + /* + * Reject clearly excessive dotclocks early to + * avoid having to worry about huge integers later. + */ + if (mode->clock > max_dotclock(dev_priv)) + return MODE_CLOCK_HIGH; + /* Transcoder timing limits */ if (DISPLAY_VER(dev_priv) >= 11) { hdisplay_max = 16384; diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index c86e5d4ee016..1dddd6abd77b 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -26,10 +26,17 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb, struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct i915_gem_ww_ctx ww; struct i915_vma *vma; u32 alignment; int ret; + /* + * We are not syncing against the binding (and potential migrations) + * below, so this vm must never be async. + */ + GEM_WARN_ON(vm->bind_async_flags); + if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) return ERR_PTR(-EINVAL); @@ -37,29 +44,48 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb, atomic_inc(&dev_priv->gpu_error.pending_fb_pin); - ret = i915_gem_object_lock_interruptible(obj, NULL); - if (!ret) { + for_i915_gem_ww(&ww, ret, true) { + ret = i915_gem_object_lock(obj, &ww); + if (ret) + continue; + + if (HAS_LMEM(dev_priv)) { + unsigned int flags = obj->flags; + + /* + * For this type of buffer we need to able to read from the CPU + * the clear color value found in the buffer, hence we need to + * ensure it is always in the mappable part of lmem, if this is + * a small-bar device. + */ + if (intel_fb_rc_ccs_cc_plane(fb) >= 0) + flags &= ~I915_BO_ALLOC_GPU_ONLY; + ret = __i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM_0, + flags); + if (ret) + continue; + } + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE); - i915_gem_object_unlock(obj); - } - if (ret) { - vma = ERR_PTR(ret); - goto err; - } + if (ret) + continue; - vma = i915_vma_instance(obj, vm, view); - if (IS_ERR(vma)) - goto err; + vma = i915_vma_instance(obj, vm, view); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + continue; + } - if (i915_vma_misplaced(vma, 0, alignment, 0)) { - ret = i915_vma_unbind_unlocked(vma); - if (ret) { - vma = ERR_PTR(ret); - goto err; + if (i915_vma_misplaced(vma, 0, alignment, 0)) { + ret = i915_vma_unbind(vma); + if (ret) + continue; } - } - ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL); + ret = i915_vma_pin_ww(vma, &ww, 0, alignment, PIN_GLOBAL); + if (ret) + continue; + } if (ret) { vma = ERR_PTR(ret); goto err; diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 9def8d9fade6..d4cce627d7a8 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -116,34 +116,56 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp) } } +static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR : + EDP_PSR_ERROR(intel_dp->psr.transcoder); +} + +static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT : + EDP_PSR_POST_EXIT(intel_dp->psr.transcoder); +} + +static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY : + EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder); +} + +static u32 psr_irq_mask_get(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK : + EDP_PSR_MASK(intel_dp->psr.transcoder); +} + static void psr_irq_control(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - enum transcoder trans_shift; i915_reg_t imr_reg; u32 mask, val; - /* - * gen12+ has registers relative to transcoder and one per transcoder - * using the same bit definition: handle it as TRANSCODER_EDP to force - * 0 shift in bit definition - */ - if (DISPLAY_VER(dev_priv) >= 12) { - trans_shift = 0; + if (DISPLAY_VER(dev_priv) >= 12) imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); - } else { - trans_shift = intel_dp->psr.transcoder; + else imr_reg = EDP_PSR_IMR; - } - mask = EDP_PSR_ERROR(trans_shift); + mask = psr_irq_psr_error_bit_get(intel_dp); if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ) - mask |= EDP_PSR_POST_EXIT(trans_shift) | - EDP_PSR_PRE_ENTRY(trans_shift); + mask |= psr_irq_post_exit_bit_get(intel_dp) | + psr_irq_pre_entry_bit_get(intel_dp); - /* Warning: it is masking/setting reserved bits too */ val = intel_de_read(dev_priv, imr_reg); - val &= ~EDP_PSR_TRANS_MASK(trans_shift); + val &= ~psr_irq_mask_get(intel_dp); val |= ~mask; intel_de_write(dev_priv, imr_reg, val); } @@ -191,25 +213,21 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) enum transcoder cpu_transcoder = intel_dp->psr.transcoder; struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); ktime_t time_ns = ktime_get(); - enum transcoder trans_shift; i915_reg_t imr_reg; - if (DISPLAY_VER(dev_priv) >= 12) { - trans_shift = 0; + if (DISPLAY_VER(dev_priv) >= 12) imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder); - } else { - trans_shift = intel_dp->psr.transcoder; + else imr_reg = EDP_PSR_IMR; - } - if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { + if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) { intel_dp->psr.last_entry_attempt = time_ns; drm_dbg_kms(&dev_priv->drm, "[transcoder %s] PSR entry attempt in 2 vblanks\n", transcoder_name(cpu_transcoder)); } - if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { + if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) { intel_dp->psr.last_exit = time_ns; drm_dbg_kms(&dev_priv->drm, "[transcoder %s] PSR exit completed\n", @@ -226,7 +244,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) } } - if (psr_iir & EDP_PSR_ERROR(trans_shift)) { + if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) { u32 val; drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n", @@ -243,7 +261,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) * or unset irq_aux_error. */ val = intel_de_read(dev_priv, imr_reg); - val |= EDP_PSR_ERROR(trans_shift); + val |= psr_irq_psr_error_bit_get(intel_dp); intel_de_write(dev_priv, imr_reg, val); schedule_work(&intel_dp->psr.work); @@ -1194,14 +1212,12 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp) * first time that PSR HW tries to activate so lets keep PSR disabled * to avoid any rendering problems. */ - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(dev_priv) >= 12) val = intel_de_read(dev_priv, TRANS_PSR_IIR(intel_dp->psr.transcoder)); - val &= EDP_PSR_ERROR(0); - } else { + else val = intel_de_read(dev_priv, EDP_PSR_IIR); - val &= EDP_PSR_ERROR(intel_dp->psr.transcoder); - } + val &= psr_irq_psr_error_bit_get(intel_dp); if (val) { intel_dp->psr.sink_not_reliable = true; drm_dbg_kms(&dev_priv->drm, diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 01b0932757ed..18178b01375e 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -1710,10 +1710,22 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, modifier == I915_FORMAT_MOD_4_TILED || modifier == I915_FORMAT_MOD_Yf_TILED || modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + modifier == I915_FORMAT_MOD_Yf_TILED_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || + modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS || + modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS || + modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC; wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; + modifier == I915_FORMAT_MOD_Yf_TILED_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || + modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || + modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS || + modifier == I915_FORMAT_MOD_4_TILED_DG2_MC_CCS || + modifier == I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC; wp->is_planar = intel_format_info_is_yuv_semiplanar(format, modifier); wp->width = width; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 0bcde53c50c6..1e29b1e6d186 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -1387,14 +1387,8 @@ kill_engines(struct i915_gem_engines *engines, bool exit, bool persistent) */ for_each_gem_engine(ce, engines, it) { struct intel_engine_cs *engine; - bool skip = false; - if (exit) - skip = intel_context_set_exiting(ce); - else if (!persistent) - skip = intel_context_exit_nonpersistent(ce, NULL); - - if (skip) + if ((exit || !persistent) && intel_context_revoke(ce)) continue; /* Already marked. */ /* diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index cd75b0ca2555..845023c14eb3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2424,7 +2424,7 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv, /* Check whether the file_priv has already selected one ring. */ if ((int)file_priv->bsd_engine < 0) file_priv->bsd_engine = - get_random_int() % num_vcs_engines(dev_priv); + prandom_u32_max(num_vcs_engines(dev_priv)); return file_priv->bsd_engine; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 7ff9c7877bec..369006c5317f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -653,6 +653,41 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, enum intel_region_id id) { + return __i915_gem_object_migrate(obj, ww, id, obj->flags); +} + +/** + * __i915_gem_object_migrate - Migrate an object to the desired region id, with + * control of the extra flags + * @obj: The object to migrate. + * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may + * not be successful in evicting other objects to make room for this object. + * @id: The region id to migrate to. + * @flags: The object flags. Normally just obj->flags. + * + * Attempt to migrate the object to the desired memory region. The + * object backend must support migration and the object may not be + * pinned, (explicitly pinned pages or pinned vmas). The object must + * be locked. + * On successful completion, the object will have pages pointing to + * memory in the new region, but an async migration task may not have + * completed yet, and to accomplish that, i915_gem_object_wait_migration() + * must be called. + * + * Note: the @ww parameter is not used yet, but included to make sure + * callers put some effort into obtaining a valid ww ctx if one is + * available. + * + * Return: 0 on success. Negative error code on failure. In particular may + * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance + * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and + * -EBUSY if the object is pinned. + */ +int __i915_gem_object_migrate(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww, + enum intel_region_id id, + unsigned int flags) +{ struct drm_i915_private *i915 = to_i915(obj->base.dev); struct intel_memory_region *mr; @@ -672,7 +707,7 @@ int i915_gem_object_migrate(struct drm_i915_gem_object *obj, return 0; } - return obj->ops->migrate(obj, mr); + return obj->ops->migrate(obj, mr, flags); } /** diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 7317d4102955..1723af9b0f6a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -608,6 +608,10 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj); int i915_gem_object_migrate(struct drm_i915_gem_object *obj, struct i915_gem_ww_ctx *ww, enum intel_region_id id); +int __i915_gem_object_migrate(struct drm_i915_gem_object *obj, + struct i915_gem_ww_ctx *ww, + enum intel_region_id id, + unsigned int flags); bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj, enum intel_region_id id); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 40305e2bcd49..d0d6772e6f36 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -107,7 +107,8 @@ struct drm_i915_gem_object_ops { * pinning or for as long as the object lock is held. */ int (*migrate)(struct drm_i915_gem_object *obj, - struct intel_memory_region *mr); + struct intel_memory_region *mr, + unsigned int flags); void (*release)(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index e3fc38dd5db0..4f861782c3e8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -848,9 +848,10 @@ static int __i915_ttm_migrate(struct drm_i915_gem_object *obj, } static int i915_ttm_migrate(struct drm_i915_gem_object *obj, - struct intel_memory_region *mr) + struct intel_memory_region *mr, + unsigned int flags) { - return __i915_ttm_migrate(obj, mr, obj->flags); + return __i915_ttm_migrate(obj, mr, flags); } static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 8423df021b71..d4398948f016 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -426,12 +426,11 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = { static int probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { - const unsigned long end = addr + len; + VMA_ITERATOR(vmi, mm, addr); struct vm_area_struct *vma; - int ret = -EFAULT; mmap_read_lock(mm); - for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { + for_each_vma_range(vmi, vma, addr + len) { /* Check for holes, note that we also update the addr below */ if (vma->vm_start > addr) break; @@ -439,16 +438,13 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) break; - if (vma->vm_end >= end) { - ret = 0; - break; - } - addr = vma->vm_end; } mmap_read_unlock(mm); - return ret; + if (vma) + return -EFAULT; + return 0; } /* diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 654a092ed3d6..e94365b08f1e 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -614,13 +614,12 @@ bool intel_context_ban(struct intel_context *ce, struct i915_request *rq) return ret; } -bool intel_context_exit_nonpersistent(struct intel_context *ce, - struct i915_request *rq) +bool intel_context_revoke(struct intel_context *ce) { bool ret = intel_context_set_exiting(ce); if (ce->ops->revoke) - ce->ops->revoke(ce, rq, ce->engine->props.preempt_timeout_ms); + ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms); return ret; } diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index 8e2d70630c49..be09fb2e883a 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -329,8 +329,7 @@ static inline bool intel_context_set_exiting(struct intel_context *ce) return test_and_set_bit(CONTEXT_EXITING, &ce->flags); } -bool intel_context_exit_nonpersistent(struct intel_context *ce, - struct i915_request *rq); +bool intel_context_revoke(struct intel_context *ce); static inline bool intel_context_force_single_submission(const struct intel_context *ce) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 30cf5c3369d9..2049a00417af 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -1275,10 +1275,16 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm) atomic_read(&vma->flags) & I915_VMA_BIND_MASK; GEM_BUG_ON(!was_bound); - if (!retained_ptes) + if (!retained_ptes) { + /* + * Clear the bound flags of the vma resource to allow + * ptes to be repopulated. + */ + vma->resource->bound_flags = 0; vma->ops->bind_vma(vm, NULL, vma->resource, obj ? obj->cache_level : 0, was_bound); + } if (obj) { /* only used during resume => exclusive access */ write_domain_objs |= fetch_and_zero(&obj->write_domain); obj->read_domains |= I915_GEM_DOMAIN_GTT; diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index c6ebe2781076..152244d7f62a 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -207,6 +207,14 @@ static const struct drm_i915_mocs_entry broxton_mocs_table[] = { MOCS_ENTRY(15, \ LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \ L3_3_WB), \ + /* Bypass LLC - Uncached (EHL+) */ \ + MOCS_ENTRY(16, \ + LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \ + L3_1_UC), \ + /* Bypass LLC - L3 (Read-Only) (EHL+) */ \ + MOCS_ENTRY(17, \ + LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \ + L3_3_WB), \ /* Self-Snoop - L3 + LLC */ \ MOCS_ENTRY(18, \ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 22ba66e48a9b..1db59eeb34db 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -684,7 +684,7 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq) * Corner case where requests were sitting in the priority list or a * request resubmitted after the context was banned. */ - if (unlikely(intel_context_is_banned(ce))) { + if (unlikely(!intel_context_is_schedulable(ce))) { i915_request_put(i915_request_mark_eio(rq)); intel_engine_signal_breadcrumbs(ce->engine); return 0; @@ -870,15 +870,15 @@ static int guc_wq_item_append(struct intel_guc *guc, struct i915_request *rq) { struct intel_context *ce = request_to_scheduling_context(rq); - int ret = 0; + int ret; - if (likely(!intel_context_is_banned(ce))) { - ret = __guc_wq_item_append(rq); + if (unlikely(!intel_context_is_schedulable(ce))) + return 0; - if (unlikely(ret == -EBUSY)) { - guc->stalled_request = rq; - guc->submission_stall_reason = STALL_MOVE_LRC_TAIL; - } + ret = __guc_wq_item_append(rq); + if (unlikely(ret == -EBUSY)) { + guc->stalled_request = rq; + guc->submission_stall_reason = STALL_MOVE_LRC_TAIL; } return ret; @@ -897,7 +897,7 @@ static bool multi_lrc_submit(struct i915_request *rq) * submitting all the requests generated in parallel. */ return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) || - intel_context_is_banned(ce); + !intel_context_is_schedulable(ce); } static int guc_dequeue_one_context(struct intel_guc *guc) @@ -966,7 +966,7 @@ register_context: struct intel_context *ce = request_to_scheduling_context(last); if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) && - !intel_context_is_banned(ce))) { + intel_context_is_schedulable(ce))) { ret = try_context_registration(ce, false); if (unlikely(ret == -EPIPE)) { goto deadlk; @@ -1576,7 +1576,7 @@ static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub) { struct intel_engine_cs *engine = __context_to_physical_engine(ce); - if (intel_context_is_banned(ce)) + if (!intel_context_is_schedulable(ce)) return; GEM_BUG_ON(!intel_context_is_pinned(ce)); @@ -4424,12 +4424,12 @@ static void guc_handle_context_reset(struct intel_guc *guc, { trace_intel_context_reset(ce); - if (likely(!intel_context_is_banned(ce))) { + if (likely(intel_context_is_schedulable(ce))) { capture_error_state(guc, ce); guc_context_replay(ce); } else { drm_info(&guc_to_gt(guc)->i915->drm, - "Ignoring context reset notification of banned context 0x%04X on %s", + "Ignoring context reset notification of exiting context 0x%04X on %s", ce->guc_id.id, ce->engine->name); } } diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 3b81a6d35a7b..076c779f776a 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -240,13 +240,13 @@ static void free_resource(struct intel_vgpu *vgpu) } static int alloc_resource(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param) + const struct intel_vgpu_config *conf) { struct intel_gvt *gvt = vgpu->gvt; unsigned long request, avail, max, taken; const char *item; - if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { + if (!conf->low_mm || !conf->high_mm || !conf->fence) { gvt_vgpu_err("Invalid vGPU creation params\n"); return -EINVAL; } @@ -255,7 +255,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, max = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; taken = gvt->gm.vgpu_allocated_low_gm_size; avail = max - taken; - request = MB_TO_BYTES(param->low_gm_sz); + request = conf->low_mm; if (request > avail) goto no_enough_resource; @@ -266,7 +266,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, max = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; taken = gvt->gm.vgpu_allocated_high_gm_size; avail = max - taken; - request = MB_TO_BYTES(param->high_gm_sz); + request = conf->high_mm; if (request > avail) goto no_enough_resource; @@ -277,16 +277,16 @@ static int alloc_resource(struct intel_vgpu *vgpu, max = gvt_fence_sz(gvt) - HOST_FENCE; taken = gvt->fence.vgpu_allocated_fence_num; avail = max - taken; - request = param->fence_sz; + request = conf->fence; if (request > avail) goto no_enough_resource; vgpu_fence_sz(vgpu) = request; - gvt->gm.vgpu_allocated_low_gm_size += MB_TO_BYTES(param->low_gm_sz); - gvt->gm.vgpu_allocated_high_gm_size += MB_TO_BYTES(param->high_gm_sz); - gvt->fence.vgpu_allocated_fence_num += param->fence_sz; + gvt->gm.vgpu_allocated_low_gm_size += conf->low_mm; + gvt->gm.vgpu_allocated_high_gm_size += conf->high_mm; + gvt->fence.vgpu_allocated_fence_num += conf->fence; return 0; no_enough_resource: @@ -340,11 +340,11 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) * */ int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param) + const struct intel_vgpu_config *conf) { int ret; - ret = alloc_resource(vgpu, param); + ret = alloc_resource(vgpu, conf); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 705689e64011..dbf8d7470b2c 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -36,6 +36,7 @@ #include <uapi/linux/pci_regs.h> #include <linux/kvm_host.h> #include <linux/vfio.h> +#include <linux/mdev.h> #include "i915_drv.h" #include "intel_gvt.h" @@ -172,6 +173,7 @@ struct intel_vgpu_submission { #define KVMGT_DEBUGFS_FILENAME "kvmgt_nr_cache_entries" struct intel_vgpu { + struct vfio_device vfio_device; struct intel_gvt *gvt; struct mutex vgpu_lock; int id; @@ -211,7 +213,6 @@ struct intel_vgpu { u32 scan_nonprivbb; - struct vfio_device vfio_device; struct vfio_region *region; int num_regions; struct eventfd_ctx *intx_trigger; @@ -294,15 +295,25 @@ struct intel_gvt_firmware { bool firmware_loaded; }; -#define NR_MAX_INTEL_VGPU_TYPES 20 -struct intel_vgpu_type { - char name[16]; - unsigned int avail_instance; - unsigned int low_gm_size; - unsigned int high_gm_size; +struct intel_vgpu_config { + unsigned int low_mm; + unsigned int high_mm; unsigned int fence; + + /* + * A vGPU with a weight of 8 will get twice as much GPU as a vGPU with + * a weight of 4 on a contended host, different vGPU type has different + * weight set. Legal weights range from 1 to 16. + */ unsigned int weight; - enum intel_vgpu_edid resolution; + enum intel_vgpu_edid edid; + const char *name; +}; + +struct intel_vgpu_type { + struct mdev_type type; + char name[16]; + const struct intel_vgpu_config *conf; }; struct intel_gvt { @@ -326,6 +337,8 @@ struct intel_gvt { struct intel_gvt_workload_scheduler scheduler; struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); + struct mdev_parent parent; + struct mdev_type **mdev_types; struct intel_vgpu_type *types; unsigned int num_types; struct intel_vgpu *idle_vgpu; @@ -436,19 +449,8 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); /* ring context size i.e. the first 0x50 dwords*/ #define RING_CTX_SIZE 320 -struct intel_vgpu_creation_params { - __u64 low_gm_sz; /* in MB */ - __u64 high_gm_sz; /* in MB */ - __u64 fence_sz; - __u64 resolution; - __s32 primary; - __u64 vgpu_id; - - __u32 weight; -}; - int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, - struct intel_vgpu_creation_params *param); + const struct intel_vgpu_config *conf); void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); void intel_vgpu_free_resource(struct intel_vgpu *vgpu); void intel_vgpu_write_fence(struct intel_vgpu *vgpu, @@ -494,8 +496,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt); void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu); -struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, - struct intel_vgpu_type *type); +int intel_gvt_create_vgpu(struct intel_vgpu *vgpu, + const struct intel_vgpu_config *conf); void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); void intel_gvt_release_vgpu(struct intel_vgpu *vgpu); void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index e3cd58946477..7a45e5360caf 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -34,7 +34,6 @@ */ #include <linux/init.h> -#include <linux/device.h> #include <linux/mm.h> #include <linux/kthread.h> #include <linux/sched/mm.h> @@ -43,7 +42,6 @@ #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/eventfd.h> -#include <linux/uuid.h> #include <linux/mdev.h> #include <linux/debugfs.h> @@ -115,117 +113,18 @@ static void kvmgt_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_page_track_notifier_node *node); -static ssize_t available_instances_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, - char *buf) +static ssize_t intel_vgpu_show_description(struct mdev_type *mtype, char *buf) { - struct intel_vgpu_type *type; - unsigned int num = 0; - struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt; - - type = &gvt->types[mtype_get_type_group_id(mtype)]; - if (!type) - num = 0; - else - num = type->avail_instance; - - return sprintf(buf, "%u\n", num); -} - -static ssize_t device_api_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); -} - -static ssize_t description_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - struct intel_vgpu_type *type; - struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt; - - type = &gvt->types[mtype_get_type_group_id(mtype)]; - if (!type) - return 0; + struct intel_vgpu_type *type = + container_of(mtype, struct intel_vgpu_type, type); return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" "fence: %d\nresolution: %s\n" "weight: %d\n", - BYTES_TO_MB(type->low_gm_size), - BYTES_TO_MB(type->high_gm_size), - type->fence, vgpu_edid_str(type->resolution), - type->weight); -} - -static ssize_t name_show(struct mdev_type *mtype, - struct mdev_type_attribute *attr, char *buf) -{ - struct intel_vgpu_type *type; - struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt; - - type = &gvt->types[mtype_get_type_group_id(mtype)]; - if (!type) - return 0; - - return sprintf(buf, "%s\n", type->name); -} - -static MDEV_TYPE_ATTR_RO(available_instances); -static MDEV_TYPE_ATTR_RO(device_api); -static MDEV_TYPE_ATTR_RO(description); -static MDEV_TYPE_ATTR_RO(name); - -static struct attribute *gvt_type_attrs[] = { - &mdev_type_attr_available_instances.attr, - &mdev_type_attr_device_api.attr, - &mdev_type_attr_description.attr, - &mdev_type_attr_name.attr, - NULL, -}; - -static struct attribute_group *gvt_vgpu_type_groups[] = { - [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL, -}; - -static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt) -{ - int i, j; - struct intel_vgpu_type *type; - struct attribute_group *group; - - for (i = 0; i < gvt->num_types; i++) { - type = &gvt->types[i]; - - group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); - if (!group) - goto unwind; - - group->name = type->name; - group->attrs = gvt_type_attrs; - gvt_vgpu_type_groups[i] = group; - } - - return 0; - -unwind: - for (j = 0; j < i; j++) { - group = gvt_vgpu_type_groups[j]; - kfree(group); - } - - return -ENOMEM; -} - -static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt) -{ - int i; - struct attribute_group *group; - - for (i = 0; i < gvt->num_types; i++) { - group = gvt_vgpu_type_groups[i]; - gvt_vgpu_type_groups[i] = NULL; - kfree(group); - } + BYTES_TO_MB(type->conf->low_mm), + BYTES_TO_MB(type->conf->high_mm), + type->conf->fence, vgpu_edid_str(type->conf->edid), + type->conf->weight); } static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, @@ -1546,7 +1445,28 @@ static const struct attribute_group *intel_vgpu_groups[] = { NULL, }; +static int intel_vgpu_init_dev(struct vfio_device *vfio_dev) +{ + struct mdev_device *mdev = to_mdev_device(vfio_dev->dev); + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); + struct intel_vgpu_type *type = + container_of(mdev->type, struct intel_vgpu_type, type); + + vgpu->gvt = kdev_to_i915(mdev->type->parent->dev)->gvt; + return intel_gvt_create_vgpu(vgpu, type->conf); +} + +static void intel_vgpu_release_dev(struct vfio_device *vfio_dev) +{ + struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev); + + intel_gvt_destroy_vgpu(vgpu); + vfio_free_device(vfio_dev); +} + static const struct vfio_device_ops intel_vgpu_dev_ops = { + .init = intel_vgpu_init_dev, + .release = intel_vgpu_release_dev, .open_device = intel_vgpu_open_device, .close_device = intel_vgpu_close_device, .read = intel_vgpu_read, @@ -1558,35 +1478,28 @@ static const struct vfio_device_ops intel_vgpu_dev_ops = { static int intel_vgpu_probe(struct mdev_device *mdev) { - struct device *pdev = mdev_parent_dev(mdev); - struct intel_gvt *gvt = kdev_to_i915(pdev)->gvt; - struct intel_vgpu_type *type; struct intel_vgpu *vgpu; int ret; - type = &gvt->types[mdev_get_type_group_id(mdev)]; - if (!type) - return -EINVAL; - - vgpu = intel_gvt_create_vgpu(gvt, type); + vgpu = vfio_alloc_device(intel_vgpu, vfio_device, &mdev->dev, + &intel_vgpu_dev_ops); if (IS_ERR(vgpu)) { gvt_err("failed to create intel vgpu: %ld\n", PTR_ERR(vgpu)); return PTR_ERR(vgpu); } - vfio_init_group_dev(&vgpu->vfio_device, &mdev->dev, - &intel_vgpu_dev_ops); - dev_set_drvdata(&mdev->dev, vgpu); ret = vfio_register_emulated_iommu_dev(&vgpu->vfio_device); - if (ret) { - intel_gvt_destroy_vgpu(vgpu); - return ret; - } + if (ret) + goto out_put_vdev; gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n", dev_name(mdev_dev(mdev))); return 0; + +out_put_vdev: + vfio_put_device(&vgpu->vfio_device); + return ret; } static void intel_vgpu_remove(struct mdev_device *mdev) @@ -1595,18 +1508,43 @@ static void intel_vgpu_remove(struct mdev_device *mdev) if (WARN_ON_ONCE(vgpu->attached)) return; - intel_gvt_destroy_vgpu(vgpu); + + vfio_unregister_group_dev(&vgpu->vfio_device); + vfio_put_device(&vgpu->vfio_device); +} + +static unsigned int intel_vgpu_get_available(struct mdev_type *mtype) +{ + struct intel_vgpu_type *type = + container_of(mtype, struct intel_vgpu_type, type); + struct intel_gvt *gvt = kdev_to_i915(mtype->parent->dev)->gvt; + unsigned int low_gm_avail, high_gm_avail, fence_avail; + + mutex_lock(&gvt->lock); + low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE - + gvt->gm.vgpu_allocated_low_gm_size; + high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE - + gvt->gm.vgpu_allocated_high_gm_size; + fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - + gvt->fence.vgpu_allocated_fence_num; + mutex_unlock(&gvt->lock); + + return min3(low_gm_avail / type->conf->low_mm, + high_gm_avail / type->conf->high_mm, + fence_avail / type->conf->fence); } static struct mdev_driver intel_vgpu_mdev_driver = { + .device_api = VFIO_DEVICE_API_PCI_STRING, .driver = { .name = "intel_vgpu_mdev", .owner = THIS_MODULE, .dev_groups = intel_vgpu_groups, }, - .probe = intel_vgpu_probe, - .remove = intel_vgpu_remove, - .supported_type_groups = gvt_vgpu_type_groups, + .probe = intel_vgpu_probe, + .remove = intel_vgpu_remove, + .get_available = intel_vgpu_get_available, + .show_description = intel_vgpu_show_description, }; int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) @@ -1904,8 +1842,7 @@ static void intel_gvt_clean_device(struct drm_i915_private *i915) if (drm_WARN_ON(&i915->drm, !gvt)) return; - mdev_unregister_device(i915->drm.dev); - intel_gvt_cleanup_vgpu_type_groups(gvt); + mdev_unregister_parent(&gvt->parent); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_clean_vgpu_types(gvt); @@ -2005,19 +1942,15 @@ static int intel_gvt_init_device(struct drm_i915_private *i915) intel_gvt_debugfs_init(gvt); - ret = intel_gvt_init_vgpu_type_groups(gvt); + ret = mdev_register_parent(&gvt->parent, i915->drm.dev, + &intel_vgpu_mdev_driver, + gvt->mdev_types, gvt->num_types); if (ret) goto out_destroy_idle_vgpu; - ret = mdev_register_device(i915->drm.dev, &intel_vgpu_mdev_driver); - if (ret) - goto out_cleanup_vgpu_type_groups; - gvt_dbg_core("gvt device initialization is done\n"); return 0; -out_cleanup_vgpu_type_groups: - intel_gvt_cleanup_vgpu_type_groups(gvt); out_destroy_idle_vgpu: intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_debugfs_clean(gvt); diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 46da19b3225d..56c71474008a 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -73,24 +73,21 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE); } +/* + * vGPU type name is defined as GVTg_Vx_y which contains the physical GPU + * generation type (e.g V4 as BDW server, V5 as SKL server). + * + * Depening on the physical SKU resource, we might see vGPU types like + * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create different types of + * vGPU on same physical GPU depending on available resource. Each vGPU + * type will have a different number of avail_instance to indicate how + * many vGPU instance can be created for this type. + */ #define VGPU_MAX_WEIGHT 16 #define VGPU_WEIGHT(vgpu_num) \ (VGPU_MAX_WEIGHT / (vgpu_num)) -static const struct { - unsigned int low_mm; - unsigned int high_mm; - unsigned int fence; - - /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU - * with a weight of 4 on a contended host, different vGPU type has - * different weight set. Legal weights range from 1 to 16. - */ - unsigned int weight; - enum intel_vgpu_edid edid; - const char *name; -} vgpu_types[] = { -/* Fixed vGPU type table */ +static const struct intel_vgpu_config intel_vgpu_configs[] = { { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" }, { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" }, { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" }, @@ -106,102 +103,58 @@ static const struct { */ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) { - unsigned int num_types; - unsigned int i, low_avail, high_avail; - unsigned int min_low; - - /* vGPU type name is defined as GVTg_Vx_y which contains - * physical GPU generation type (e.g V4 as BDW server, V5 as - * SKL server). - * - * Depend on physical SKU resource, might see vGPU types like - * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create - * different types of vGPU on same physical GPU depending on - * available resource. Each vGPU type will have "avail_instance" - * to indicate how many vGPU instance can be created for this - * type. - * - */ - low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; - high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; - num_types = ARRAY_SIZE(vgpu_types); + unsigned int low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; + unsigned int high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; + unsigned int num_types = ARRAY_SIZE(intel_vgpu_configs); + unsigned int i; gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type), GFP_KERNEL); if (!gvt->types) return -ENOMEM; - min_low = MB_TO_BYTES(32); - for (i = 0; i < num_types; ++i) { - if (low_avail / vgpu_types[i].low_mm == 0) - break; - - gvt->types[i].low_gm_size = vgpu_types[i].low_mm; - gvt->types[i].high_gm_size = vgpu_types[i].high_mm; - gvt->types[i].fence = vgpu_types[i].fence; + gvt->mdev_types = kcalloc(num_types, sizeof(*gvt->mdev_types), + GFP_KERNEL); + if (!gvt->mdev_types) + goto out_free_types; - if (vgpu_types[i].weight < 1 || - vgpu_types[i].weight > VGPU_MAX_WEIGHT) - return -EINVAL; + for (i = 0; i < num_types; ++i) { + const struct intel_vgpu_config *conf = &intel_vgpu_configs[i]; - gvt->types[i].weight = vgpu_types[i].weight; - gvt->types[i].resolution = vgpu_types[i].edid; - gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, - high_avail / vgpu_types[i].high_mm); + if (low_avail / conf->low_mm == 0) + break; + if (conf->weight < 1 || conf->weight > VGPU_MAX_WEIGHT) + goto out_free_mdev_types; - if (GRAPHICS_VER(gvt->gt->i915) == 8) - sprintf(gvt->types[i].name, "GVTg_V4_%s", - vgpu_types[i].name); - else if (GRAPHICS_VER(gvt->gt->i915) == 9) - sprintf(gvt->types[i].name, "GVTg_V5_%s", - vgpu_types[i].name); + sprintf(gvt->types[i].name, "GVTg_V%u_%s", + GRAPHICS_VER(gvt->gt->i915) == 8 ? 4 : 5, conf->name); + gvt->types[i].conf = conf; gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n", i, gvt->types[i].name, - gvt->types[i].avail_instance, - gvt->types[i].low_gm_size, - gvt->types[i].high_gm_size, gvt->types[i].fence, - gvt->types[i].weight, - vgpu_edid_str(gvt->types[i].resolution)); + min(low_avail / conf->low_mm, + high_avail / conf->high_mm), + conf->low_mm, conf->high_mm, conf->fence, + conf->weight, vgpu_edid_str(conf->edid)); + + gvt->mdev_types[i] = &gvt->types[i].type; + gvt->mdev_types[i]->sysfs_name = gvt->types[i].name; } gvt->num_types = i; return 0; -} -void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt) -{ +out_free_mdev_types: + kfree(gvt->mdev_types); +out_free_types: kfree(gvt->types); + return -EINVAL; } -static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) +void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt) { - int i; - unsigned int low_gm_avail, high_gm_avail, fence_avail; - unsigned int low_gm_min, high_gm_min, fence_min; - - /* Need to depend on maxium hw resource size but keep on - * static config for now. - */ - low_gm_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE - - gvt->gm.vgpu_allocated_low_gm_size; - high_gm_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE - - gvt->gm.vgpu_allocated_high_gm_size; - fence_avail = gvt_fence_sz(gvt) - HOST_FENCE - - gvt->fence.vgpu_allocated_fence_num; - - for (i = 0; i < gvt->num_types; i++) { - low_gm_min = low_gm_avail / gvt->types[i].low_gm_size; - high_gm_min = high_gm_avail / gvt->types[i].high_gm_size; - fence_min = fence_avail / gvt->types[i].fence; - gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min), - fence_min); - - gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n", - i, gvt->types[i].name, - gvt->types[i].avail_instance, gvt->types[i].low_gm_size, - gvt->types[i].high_gm_size, gvt->types[i].fence); - } + kfree(gvt->mdev_types); + kfree(gvt->types); } /** @@ -298,12 +251,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_mmio(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); mutex_unlock(&vgpu->vgpu_lock); - - mutex_lock(&gvt->lock); - intel_gvt_update_vgpu_types(gvt); - mutex_unlock(&gvt->lock); - - vfree(vgpu); } #define IDLE_VGPU_IDR 0 @@ -363,42 +310,38 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu) vfree(vgpu); } -static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, - struct intel_vgpu_creation_params *param) +int intel_gvt_create_vgpu(struct intel_vgpu *vgpu, + const struct intel_vgpu_config *conf) { + struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->gt->i915; - struct intel_vgpu *vgpu; int ret; - gvt_dbg_core("low %llu MB high %llu MB fence %llu\n", - param->low_gm_sz, param->high_gm_sz, - param->fence_sz); - - vgpu = vzalloc(sizeof(*vgpu)); - if (!vgpu) - return ERR_PTR(-ENOMEM); + gvt_dbg_core("low %u MB high %u MB fence %u\n", + BYTES_TO_MB(conf->low_mm), BYTES_TO_MB(conf->high_mm), + conf->fence); + mutex_lock(&gvt->lock); ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU, GFP_KERNEL); if (ret < 0) - goto out_free_vgpu; + goto out_unlock;; vgpu->id = ret; - vgpu->gvt = gvt; - vgpu->sched_ctl.weight = param->weight; + vgpu->sched_ctl.weight = conf->weight; mutex_init(&vgpu->vgpu_lock); mutex_init(&vgpu->dmabuf_lock); INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); idr_init_base(&vgpu->object_idr, 1); - intel_vgpu_init_cfg_space(vgpu, param->primary); + intel_vgpu_init_cfg_space(vgpu, 1); vgpu->d3_entered = false; ret = intel_vgpu_init_mmio(vgpu); if (ret) goto out_clean_idr; - ret = intel_vgpu_alloc_resource(vgpu, param); + ret = intel_vgpu_alloc_resource(vgpu, conf); if (ret) goto out_clean_vgpu_mmio; @@ -412,7 +355,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_gtt; - ret = intel_vgpu_init_display(vgpu, param->resolution); + ret = intel_vgpu_init_display(vgpu, conf->edid); if (ret) goto out_clean_opregion; @@ -437,7 +380,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_sched_policy; - return vgpu; + intel_gvt_update_reg_whitelist(vgpu); + mutex_unlock(&gvt->lock); + return 0; out_clean_sched_policy: intel_vgpu_clean_sched_policy(vgpu); @@ -455,48 +400,9 @@ out_clean_vgpu_mmio: intel_vgpu_clean_mmio(vgpu); out_clean_idr: idr_remove(&gvt->vgpu_idr, vgpu->id); -out_free_vgpu: - vfree(vgpu); - return ERR_PTR(ret); -} - -/** - * intel_gvt_create_vgpu - create a virtual GPU - * @gvt: GVT device - * @type: type of the vGPU to create - * - * This function is called when user wants to create a virtual GPU. - * - * Returns: - * pointer to intel_vgpu, error pointer if failed. - */ -struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, - struct intel_vgpu_type *type) -{ - struct intel_vgpu_creation_params param; - struct intel_vgpu *vgpu; - - param.primary = 1; - param.low_gm_sz = type->low_gm_size; - param.high_gm_sz = type->high_gm_size; - param.fence_sz = type->fence; - param.weight = type->weight; - param.resolution = type->resolution; - - /* XXX current param based on MB */ - param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); - param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz); - - mutex_lock(&gvt->lock); - vgpu = __intel_gvt_create_vgpu(gvt, ¶m); - if (!IS_ERR(vgpu)) { - /* calculate left instance change for types */ - intel_gvt_update_vgpu_types(gvt); - intel_gvt_update_reg_whitelist(vgpu); - } +out_unlock: mutex_unlock(&gvt->lock); - - return vgpu; + return ret; } /** diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 329ff75b80b9..7bd1861ddbdf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -137,12 +137,12 @@ static u64 random_offset(u64 start, u64 end, u64 len, u64 align) range = round_down(end - len, align) - round_up(start, align); if (range) { if (sizeof(unsigned long) == sizeof(u64)) { - addr = get_random_long(); + addr = get_random_u64(); } else { - addr = get_random_int(); + addr = get_random_u32(); if (range > U32_MAX) { addr <<= 32; - addr |= get_random_int(); + addr |= get_random_u32(); } } div64_u64_rem(addr, range, &addr); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1a9bd829fc7e..0b287a59dc2f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2157,10 +2157,18 @@ #define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A) #define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \ 0 : ((trans) - TRANSCODER_A + 1) * 8) -#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans)) -#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans)) -#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans)) -#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans)) +#define TGL_PSR_MASK REG_GENMASK(2, 0) +#define TGL_PSR_ERROR REG_BIT(2) +#define TGL_PSR_POST_EXIT REG_BIT(1) +#define TGL_PSR_PRE_ENTRY REG_BIT(0) +#define EDP_PSR_MASK(trans) (TGL_PSR_MASK << \ + _EDP_PSR_TRANS_SHIFT(trans)) +#define EDP_PSR_ERROR(trans) (TGL_PSR_ERROR << \ + _EDP_PSR_TRANS_SHIFT(trans)) +#define EDP_PSR_POST_EXIT(trans) (TGL_PSR_POST_EXIT << \ + _EDP_PSR_TRANS_SHIFT(trans)) +#define EDP_PSR_PRE_ENTRY(trans) (TGL_PSR_PRE_ENTRY << \ + _EDP_PSR_TRANS_SHIFT(trans)) #define _SRD_AUX_DATA_A 0x60814 #define _SRD_AUX_DATA_EDP 0x6f814 diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index c4e932368b37..39da0fb0d6d2 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -135,7 +135,7 @@ static int __run_selftests(const char *name, int err = 0; while (!i915_selftest.random_seed) - i915_selftest.random_seed = get_random_int(); + i915_selftest.random_seed = get_random_u32(); i915_selftest.timeout_jiffies = i915_selftest.timeout_ms ? |