diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 477 |
1 files changed, 233 insertions, 244 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 02084652fe3d..5af16ca4dabd 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -50,11 +50,16 @@ #include "i915_drv.h" #include "i915_fixed.h" #include "i915_irq.h" +#include "intel_mchbar_regs.h" #include "intel_pcode.h" #include "intel_pm.h" #include "vlv_sideband.h" #include "../../../platform/x86/intel_ips.h" +struct drm_i915_clock_gating_funcs { + void (*init_clock_gating)(struct drm_i915_private *i915); +}; + /* Stores plane specific WM parameters */ struct skl_wm_params { bool x_tiled, y_tiled; @@ -2942,27 +2947,27 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { u64 sskpd = intel_uncore_read64(uncore, MCH_SSKPD); - wm[0] = (sskpd >> 56) & 0xFF; + wm[0] = REG_FIELD_GET64(SSKPD_NEW_WM0_MASK_HSW, sskpd); if (wm[0] == 0) - wm[0] = sskpd & 0xF; - wm[1] = (sskpd >> 4) & 0xFF; - wm[2] = (sskpd >> 12) & 0xFF; - wm[3] = (sskpd >> 20) & 0x1FF; - wm[4] = (sskpd >> 32) & 0x1FF; + wm[0] = REG_FIELD_GET64(SSKPD_OLD_WM0_MASK_HSW, sskpd); + wm[1] = REG_FIELD_GET64(SSKPD_WM1_MASK_HSW, sskpd); + wm[2] = REG_FIELD_GET64(SSKPD_WM2_MASK_HSW, sskpd); + wm[3] = REG_FIELD_GET64(SSKPD_WM3_MASK_HSW, sskpd); + wm[4] = REG_FIELD_GET64(SSKPD_WM4_MASK_HSW, sskpd); } else if (DISPLAY_VER(dev_priv) >= 6) { u32 sskpd = intel_uncore_read(uncore, MCH_SSKPD); - wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK; - wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK; - wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK; - wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK; + wm[0] = REG_FIELD_GET(SSKPD_WM0_MASK_SNB, sskpd); + wm[1] = REG_FIELD_GET(SSKPD_WM1_MASK_SNB, sskpd); + wm[2] = REG_FIELD_GET(SSKPD_WM2_MASK_SNB, sskpd); + wm[3] = REG_FIELD_GET(SSKPD_WM3_MASK_SNB, sskpd); } else if (DISPLAY_VER(dev_priv) >= 5) { u32 mltr = intel_uncore_read(uncore, MLTR_ILK); /* ILK primary LP0 latency is 700 ns */ wm[0] = 7; - wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK; - wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK; + wm[1] = REG_FIELD_GET(MLTR_WM1_MASK, mltr); + wm[2] = REG_FIELD_GET(MLTR_WM2_MASK, mltr); } else { MISSING_CASE(INTEL_DEVID(dev_priv)); } @@ -3175,12 +3180,8 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state, } pipe_wm->pipe_enabled = crtc_state->hw.active; - if (sprstate) { - pipe_wm->sprites_enabled = sprstate->uapi.visible; - pipe_wm->sprites_scaled = sprstate->uapi.visible && - (drm_rect_width(&sprstate->uapi.dst) != drm_rect_width(&sprstate->uapi.src) >> 16 || - drm_rect_height(&sprstate->uapi.dst) != drm_rect_height(&sprstate->uapi.src) >> 16); - } + pipe_wm->sprites_enabled = crtc_state->active_planes & BIT(PLANE_SPRITE0); + pipe_wm->sprites_scaled = crtc_state->scaled_planes & BIT(PLANE_SPRITE0); usable_level = max_level; @@ -3409,29 +3410,28 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, * disabled. Doing otherwise could cause underruns. */ results->wm_lp[wm_lp - 1] = - (ilk_wm_lp_latency(dev_priv, level) << WM1_LP_LATENCY_SHIFT) | - (r->pri_val << WM1_LP_SR_SHIFT) | - r->cur_val; + WM_LP_LATENCY(ilk_wm_lp_latency(dev_priv, level)) | + WM_LP_PRIMARY(r->pri_val) | + WM_LP_CURSOR(r->cur_val); if (r->enable) - results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN; + results->wm_lp[wm_lp - 1] |= WM_LP_ENABLE; if (DISPLAY_VER(dev_priv) >= 8) - results->wm_lp[wm_lp - 1] |= - r->fbc_val << WM1_LP_FBC_SHIFT_BDW; + results->wm_lp[wm_lp - 1] |= WM_LP_FBC_BDW(r->fbc_val); else - results->wm_lp[wm_lp - 1] |= - r->fbc_val << WM1_LP_FBC_SHIFT; + results->wm_lp[wm_lp - 1] |= WM_LP_FBC_ILK(r->fbc_val); + + results->wm_lp_spr[wm_lp - 1] = WM_LP_SPRITE(r->spr_val); /* - * Always set WM1S_LP_EN when spr_val != 0, even if the + * Always set WM_LP_SPRITE_EN when spr_val != 0, even if the * level is disabled. Doing otherwise could cause underruns. */ if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) { drm_WARN_ON(&dev_priv->drm, wm_lp != 1); - results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val; - } else - results->wm_lp_spr[wm_lp - 1] = r->spr_val; + results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE; + } } /* LP0 register values */ @@ -3444,9 +3444,9 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv, continue; results->wm_pipe[pipe] = - (r->pri_val << WM0_PIPE_PLANE_SHIFT) | - (r->spr_val << WM0_PIPE_SPRITE_SHIFT) | - r->cur_val; + WM0_PIPE_PRIMARY(r->pri_val) | + WM0_PIPE_SPRITE(r->spr_val) | + WM0_PIPE_CURSOR(r->cur_val); } } @@ -3538,24 +3538,24 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, struct ilk_wm_values *previous = &dev_priv->wm.hw; bool changed = false; - if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { - previous->wm_lp[2] &= ~WM1_LP_SR_EN; + if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM_LP_ENABLE) { + previous->wm_lp[2] &= ~WM_LP_ENABLE; intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); changed = true; } - if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { - previous->wm_lp[1] &= ~WM1_LP_SR_EN; + if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM_LP_ENABLE) { + previous->wm_lp[1] &= ~WM_LP_ENABLE; intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); changed = true; } - if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { - previous->wm_lp[0] &= ~WM1_LP_SR_EN; + if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM_LP_ENABLE) { + previous->wm_lp[0] &= ~WM_LP_ENABLE; intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); changed = true; } /* - * Don't touch WM1S_LP_EN here. + * Don't touch WM_LP_SPRITE_ENABLE here. * Doing so could cause underruns. */ @@ -3781,48 +3781,55 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) return 0; } -void intel_sagv_pre_plane_update(struct intel_atomic_state *state) +static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_bw_state *new_bw_state; - const struct intel_bw_state *old_bw_state; - u32 new_mask = 0; + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); - /* - * Just return if we can't control SAGV or don't have it. - * This is different from situation when we have SAGV but just can't - * afford it due to DBuf limitation - in case if SAGV is completely - * disabled in a BIOS, we are not even allowed to send a PCode request, - * as it will throw an error. So have to check it here. - */ - if (!intel_has_sagv(dev_priv)) + if (!new_bw_state) return; - new_bw_state = intel_atomic_get_new_bw_state(state); + if (!intel_can_enable_sagv(i915, new_bw_state)) + intel_disable_sagv(i915); +} + +static void skl_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + if (!new_bw_state) return; - if (DISPLAY_VER(dev_priv) < 11 && !intel_can_enable_sagv(dev_priv, new_bw_state)) { - intel_disable_sagv(dev_priv); - return; - } + if (intel_can_enable_sagv(i915, new_bw_state)) + intel_enable_sagv(i915); +} - old_bw_state = intel_atomic_get_old_bw_state(state); - /* - * Nothing to mask - */ - if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask) +static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + const struct intel_bw_state *old_bw_state = + intel_atomic_get_old_bw_state(state); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + u16 old_mask, new_mask; + + if (!new_bw_state) return; + old_mask = old_bw_state->qgv_points_mask; new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; - /* - * If new mask is zero - means there is nothing to mask, - * we can only unmask, which should be done in unmask. - */ - if (!new_mask) + if (old_mask == new_mask) return; + WARN_ON(!new_bw_state->base.changed); + + drm_dbg_kms(&dev_priv->drm, "Restricting QGV points: 0x%x -> 0x%x\n", + old_mask, new_mask); + /* * Restrict required qgv points before updating the configuration. * According to BSpec we can't mask and unmask qgv points at the same @@ -3832,12 +3839,41 @@ void intel_sagv_pre_plane_update(struct intel_atomic_state *state) icl_pcode_restrict_qgv_points(dev_priv, new_mask); } -void intel_sagv_post_plane_update(struct intel_atomic_state *state) +static void icl_sagv_post_plane_update(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - const struct intel_bw_state *new_bw_state; - const struct intel_bw_state *old_bw_state; - u32 new_mask = 0; + const struct intel_bw_state *old_bw_state = + intel_atomic_get_old_bw_state(state); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + u16 old_mask, new_mask; + + if (!new_bw_state) + return; + + old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; + new_mask = new_bw_state->qgv_points_mask; + + if (old_mask == new_mask) + return; + + WARN_ON(!new_bw_state->base.changed); + + drm_dbg_kms(&dev_priv->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", + old_mask, new_mask); + + /* + * Allow required qgv points after updating the configuration. + * According to BSpec we can't mask and unmask qgv points at the same + * time. Also masking should be done before updating the configuration + * and unmasking afterwards. + */ + icl_pcode_restrict_qgv_points(dev_priv, new_mask); +} + +void intel_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); /* * Just return if we can't control SAGV or don't have it. @@ -3846,34 +3882,33 @@ void intel_sagv_post_plane_update(struct intel_atomic_state *state) * disabled in a BIOS, we are not even allowed to send a PCode request, * as it will throw an error. So have to check it here. */ - if (!intel_has_sagv(dev_priv)) + if (!intel_has_sagv(i915)) return; - new_bw_state = intel_atomic_get_new_bw_state(state); - if (!new_bw_state) - return; + if (DISPLAY_VER(i915) >= 11) + icl_sagv_pre_plane_update(state); + else + skl_sagv_pre_plane_update(state); +} - if (DISPLAY_VER(dev_priv) < 11 && intel_can_enable_sagv(dev_priv, new_bw_state)) { - intel_enable_sagv(dev_priv); - return; - } +void intel_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct drm_i915_private *i915 = to_i915(state->base.dev); - old_bw_state = intel_atomic_get_old_bw_state(state); /* - * Nothing to unmask + * Just return if we can't control SAGV or don't have it. + * This is different from situation when we have SAGV but just can't + * afford it due to DBuf limitation - in case if SAGV is completely + * disabled in a BIOS, we are not even allowed to send a PCode request, + * as it will throw an error. So have to check it here. */ - if (new_bw_state->qgv_points_mask == old_bw_state->qgv_points_mask) + if (!intel_has_sagv(i915)) return; - new_mask = new_bw_state->qgv_points_mask; - - /* - * Allow required qgv points after updating the configuration. - * According to BSpec we can't mask and unmask qgv points at the same - * time. Also masking should be done before updating the configuration - * and unmasking afterwards. - */ - icl_pcode_restrict_qgv_points(dev_priv, new_mask); + if (DISPLAY_VER(i915) >= 11) + icl_sagv_post_plane_update(state); + else + skl_sagv_post_plane_update(state); } static bool skl_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) @@ -4005,6 +4040,17 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } + if (intel_can_enable_sagv(dev_priv, new_bw_state) != + intel_can_enable_sagv(dev_priv, old_bw_state)) { + ret = intel_atomic_serialize_global_state(&new_bw_state->base); + if (ret) + return ret; + } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { + ret = intel_atomic_lock_global_state(&new_bw_state->base); + if (ret) + return ret; + } + for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; @@ -4020,20 +4066,18 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) intel_can_enable_sagv(dev_priv, new_bw_state); } - if (intel_can_enable_sagv(dev_priv, new_bw_state) != - intel_can_enable_sagv(dev_priv, old_bw_state)) { - ret = intel_atomic_serialize_global_state(&new_bw_state->base); - if (ret) - return ret; - } else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) { - ret = intel_atomic_lock_global_state(&new_bw_state->base); - if (ret) - return ret; - } - return 0; } +static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, + u16 start, u16 end) +{ + entry->start = start; + entry->end = end; + + return end; +} + static int intel_dbuf_slice_size(struct drm_i915_private *dev_priv) { return INTEL_INFO(dev_priv)->dbuf.size / @@ -4172,8 +4216,7 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) int ret; if (new_dbuf_state->weight[pipe] == 0) { - new_dbuf_state->ddb[pipe].start = 0; - new_dbuf_state->ddb[pipe].end = 0; + skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], 0, 0); goto out; } @@ -4189,8 +4232,10 @@ skl_crtc_allocate_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) start = ddb_range_size * weight_start / weight_total; end = ddb_range_size * weight_end / weight_total; - new_dbuf_state->ddb[pipe].start = ddb_slices.start - mbus_offset + start; - new_dbuf_state->ddb[pipe].end = ddb_slices.start - mbus_offset + end; + skl_ddb_entry_init(&new_dbuf_state->ddb[pipe], + ddb_slices.start - mbus_offset + start, + ddb_slices.start - mbus_offset + end); + out: if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe] && skl_ddb_entry_equal(&old_dbuf_state->ddb[pipe], @@ -4268,11 +4313,11 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, return max(num_active == 1 ? 32 : 8, min_ddb_alloc); } -static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv, - struct skl_ddb_entry *entry, u32 reg) +static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) { - entry->start = REG_FIELD_GET(PLANE_BUF_START_MASK, reg); - entry->end = REG_FIELD_GET(PLANE_BUF_END_MASK, reg); + skl_ddb_entry_init(entry, + REG_FIELD_GET(PLANE_BUF_START_MASK, reg), + REG_FIELD_GET(PLANE_BUF_END_MASK, reg)); if (entry->end) entry->end++; } @@ -4290,7 +4335,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, /* Cursor doesn't support NV12/planar, so no extra calculation needed */ if (plane_id == PLANE_CURSOR) { val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe)); - skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); + skl_ddb_entry_init_from_hw(ddb_y, val); return; } @@ -4304,7 +4349,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, if (DISPLAY_VER(dev_priv) >= 11) { val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id)); - skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); + skl_ddb_entry_init_from_hw(ddb_y, val); } else { val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id)); val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id)); @@ -4313,8 +4358,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc))) swap(val, val2); - skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); - skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2); + skl_ddb_entry_init_from_hw(ddb_y, val); + skl_ddb_entry_init_from_hw(ddb_uv, val2); } } @@ -4342,55 +4387,6 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc, intel_display_power_put(dev_priv, power_domain, wakeref); } -/* - * Determines the downscale amount of a plane for the purposes of watermark calculations. - * The bspec defines downscale amount as: - * - * """ - * Horizontal down scale amount = maximum[1, Horizontal source size / - * Horizontal destination size] - * Vertical down scale amount = maximum[1, Vertical source size / - * Vertical destination size] - * Total down scale amount = Horizontal down scale amount * - * Vertical down scale amount - * """ - * - * Return value is provided in 16.16 fixed point form to retain fractional part. - * Caller should take care of dividing & rounding off the value. - */ -static uint_fixed_16_16_t -skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); - u32 src_w, src_h, dst_w, dst_h; - uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; - uint_fixed_16_16_t downscale_h, downscale_w; - - if (drm_WARN_ON(&dev_priv->drm, - !intel_wm_plane_visible(crtc_state, plane_state))) - return u32_to_fixed16(0); - - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - * - * n.b., src is 16.16 fixed point, dst is whole integer. - */ - src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - dst_w = drm_rect_width(&plane_state->uapi.dst); - dst_h = drm_rect_height(&plane_state->uapi.dst); - - fp_w_ratio = div_fixed16(src_w, dst_w); - fp_h_ratio = div_fixed16(src_h, dst_h); - downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1)); - downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1)); - - return mul_fixed16(downscale_w, downscale_h); -} - struct dbuf_slice_conf_entry { u8 active_pipes; u8 dbuf_mask[I915_MAX_PIPES]; @@ -4831,7 +4827,7 @@ static bool check_mbus_joined(u8 active_pipes, { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes) return dbuf_slices[i].join_mbus; } @@ -4848,7 +4844,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus, { int i; - for (i = 0; i < dbuf_slices[i].active_pipes; i++) { + for (i = 0; dbuf_slices[i].active_pipes != 0; i++) { if (dbuf_slices[i].active_pipes == active_pipes && dbuf_slices[i].join_mbus == join_mbus) return dbuf_slices[i].dbuf_mask[pipe]; @@ -4946,10 +4942,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); const struct drm_framebuffer *fb = plane_state->hw.fb; - u32 data_rate; - u32 width = 0, height = 0; - uint_fixed_16_16_t down_scale_amount; - u64 rate; + int width, height; if (!plane_state->uapi.visible) return 0; @@ -4983,14 +4976,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, height /= 2; } - data_rate = width * height; - - down_scale_amount = skl_plane_downscale_amount(crtc_state, plane_state); - - rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount); - - rate *= fb->format->cpp[color_plane]; - return rate; + return width * height * fb->format->cpp[color_plane]; } static u64 @@ -5147,9 +5133,31 @@ static bool icl_need_wm1_wa(struct drm_i915_private *i915, (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); } +struct skl_plane_ddb_iter { + u64 data_rate; + u16 total[I915_MAX_PLANES]; + u16 uv_total[I915_MAX_PLANES]; + u16 start, size; +}; + +static u16 +skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter, + const struct skl_wm_level *wm, + u64 data_rate) +{ + u16 extra; + + extra = min_t(u16, iter->size, + DIV64_U64_ROUND_UP(iter->size * data_rate, iter->data_rate)); + iter->size -= extra; + iter->data_rate -= data_rate; + + return wm->min_ddb_alloc + extra; +} + static int -skl_allocate_plane_ddb(struct intel_atomic_state *state, - struct intel_crtc *crtc) +skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = @@ -5158,10 +5166,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, intel_atomic_get_new_dbuf_state(state); const struct skl_ddb_entry *alloc = &dbuf_state->ddb[crtc->pipe]; int num_active = hweight8(dbuf_state->active_pipes); - u16 alloc_size, start = 0; - u16 total[I915_MAX_PLANES] = {}; - u16 uv_total[I915_MAX_PLANES] = {}; - u64 total_data_rate; + struct skl_plane_ddb_iter iter = {}; enum plane_id plane_id; u32 blocks; int level; @@ -5174,24 +5179,21 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, return 0; if (DISPLAY_VER(dev_priv) >= 11) - total_data_rate = - icl_get_total_relative_data_rate(state, crtc); + iter.data_rate = icl_get_total_relative_data_rate(state, crtc); else - total_data_rate = - skl_get_total_relative_data_rate(state, crtc); + iter.data_rate = skl_get_total_relative_data_rate(state, crtc); - alloc_size = skl_ddb_entry_size(alloc); - if (alloc_size == 0) + iter.size = skl_ddb_entry_size(alloc); + if (iter.size == 0) return 0; /* Allocate fixed number of blocks for cursor. */ - total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active); - alloc_size -= total[PLANE_CURSOR]; - crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].start = - alloc->end - total[PLANE_CURSOR]; - crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end; + iter.total[PLANE_CURSOR] = skl_cursor_allocation(crtc_state, num_active); + iter.size -= iter.total[PLANE_CURSOR]; + skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR], + alloc->end - iter.total[PLANE_CURSOR], alloc->end); - if (total_data_rate == 0) + if (iter.data_rate == 0) return 0; /* @@ -5205,7 +5207,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, &crtc_state->wm.skl.optimal.planes[plane_id]; if (plane_id == PLANE_CURSOR) { - if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) { + if (wm->wm[level].min_ddb_alloc > iter.total[PLANE_CURSOR]) { drm_WARN_ON(&dev_priv->drm, wm->wm[level].min_ddb_alloc != U16_MAX); blocks = U32_MAX; @@ -5218,8 +5220,8 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, blocks += wm->uv_wm[level].min_ddb_alloc; } - if (blocks <= alloc_size) { - alloc_size -= blocks; + if (blocks <= iter.size) { + iter.size -= blocks; break; } } @@ -5228,7 +5230,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, drm_dbg_kms(&dev_priv->drm, "Requested display configuration exceeds system DDB limitations"); drm_dbg_kms(&dev_priv->drm, "minimum required %d/%d\n", - blocks, alloc_size); + blocks, iter.size); return -EINVAL; } @@ -5240,8 +5242,6 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, for_each_plane_id_on_crtc(crtc, plane_id) { const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - u64 rate; - u16 extra; if (plane_id == PLANE_CURSOR) continue; @@ -5250,32 +5250,24 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, * We've accounted for all active planes; remaining planes are * all disabled. */ - if (total_data_rate == 0) + if (iter.data_rate == 0) break; - rate = crtc_state->plane_data_rate[plane_id]; - extra = min_t(u16, alloc_size, - DIV64_U64_ROUND_UP(alloc_size * rate, - total_data_rate)); - total[plane_id] = wm->wm[level].min_ddb_alloc + extra; - alloc_size -= extra; - total_data_rate -= rate; + iter.total[plane_id] = + skl_allocate_plane_ddb(&iter, &wm->wm[level], + crtc_state->plane_data_rate[plane_id]); - if (total_data_rate == 0) + if (iter.data_rate == 0) break; - rate = crtc_state->uv_plane_data_rate[plane_id]; - extra = min_t(u16, alloc_size, - DIV64_U64_ROUND_UP(alloc_size * rate, - total_data_rate)); - uv_total[plane_id] = wm->uv_wm[level].min_ddb_alloc + extra; - alloc_size -= extra; - total_data_rate -= rate; + iter.uv_total[plane_id] = + skl_allocate_plane_ddb(&iter, &wm->uv_wm[level], + crtc_state->uv_plane_data_rate[plane_id]); } - drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0); + drm_WARN_ON(&dev_priv->drm, iter.size != 0 || iter.data_rate != 0); /* Set the actual DDB start/end points for each plane */ - start = alloc->start; + iter.start = alloc->start; for_each_plane_id_on_crtc(crtc, plane_id) { struct skl_ddb_entry *plane_alloc = &crtc_state->wm.skl.plane_ddb_y[plane_id]; @@ -5287,20 +5279,16 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, /* Gen11+ uses a separate plane for UV watermarks */ drm_WARN_ON(&dev_priv->drm, - DISPLAY_VER(dev_priv) >= 11 && uv_total[plane_id]); + DISPLAY_VER(dev_priv) >= 11 && iter.uv_total[plane_id]); /* Leave disabled planes at (0,0) */ - if (total[plane_id]) { - plane_alloc->start = start; - start += total[plane_id]; - plane_alloc->end = start; - } + if (iter.total[plane_id]) + iter.start = skl_ddb_entry_init(plane_alloc, iter.start, + iter.start + iter.total[plane_id]); - if (uv_total[plane_id]) { - uv_plane_alloc->start = start; - start += uv_total[plane_id]; - uv_plane_alloc->end = start; - } + if (iter.uv_total[plane_id]) + iter.start = skl_ddb_entry_init(uv_plane_alloc, iter.start, + iter.start + iter.uv_total[plane_id]); } /* @@ -5315,7 +5303,8 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, &crtc_state->wm.skl.optimal.planes[plane_id]; skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level], - total[plane_id], uv_total[plane_id]); + iter.total[plane_id], + iter.uv_total[plane_id]); if (icl_need_wm1_wa(dev_priv, plane_id) && level == 1 && wm->wm[0].enable) { @@ -5334,9 +5323,9 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - skl_check_wm_level(&wm->trans_wm, total[plane_id]); - skl_check_wm_level(&wm->sagv.wm0, total[plane_id]); - skl_check_wm_level(&wm->sagv.trans_wm, total[plane_id]); + skl_check_wm_level(&wm->trans_wm, iter.total[plane_id]); + skl_check_wm_level(&wm->sagv.wm0, iter.total[plane_id]); + skl_check_wm_level(&wm->sagv.trans_wm, iter.total[plane_id]); } return 0; @@ -6226,7 +6215,7 @@ skl_compute_ddb(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - ret = skl_allocate_plane_ddb(state, crtc); + ret = skl_crtc_allocate_plane_ddb(state, crtc); if (ret) return ret; @@ -6803,9 +6792,9 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) * multiple pipes are active. */ active->wm[0].enable = true; - active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT; - active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT; - active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK; + active->wm[0].pri_val = REG_FIELD_GET(WM0_PIPE_PRIMARY_MASK, tmp); + active->wm[0].spr_val = REG_FIELD_GET(WM0_PIPE_SPRITE_MASK, tmp); + active->wm[0].cur_val = REG_FIELD_GET(WM0_PIPE_CURSOR_MASK, tmp); } else { int level, max_level = ilk_wm_max_level(dev_priv); @@ -7229,12 +7218,12 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) */ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) { - intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN); - intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN); - intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN); + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM_LP_ENABLE); + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM_LP_ENABLE); + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM_LP_ENABLE); /* - * Don't touch WM1S_LP_EN here. + * Don't touch WM_LP_SPRITE_ENABLE here. * Doing so could cause underruns. */ } @@ -7437,7 +7426,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) u32 tmp; tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD); - if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) + if (REG_FIELD_GET(SSKPD_WM0_MASK_SNB, tmp) != 12) drm_dbg_kms(&dev_priv->drm, "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", tmp); |