diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_display.c')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 3831 |
1 files changed, 2292 insertions, 1539 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 175595fc3e45..3dc8724df400 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -34,9 +34,9 @@ #include <drm/drm_edid.h> #include <drm/drmP.h> #include "intel_drv.h" +#include "intel_frontbuffer.h" #include <drm/i915_drm.h> #include "i915_drv.h" -#include "i915_gem_dmabuf.h" #include "intel_dsi.h" #include "i915_trace.h" #include <drm/drm_atomic.h> @@ -115,8 +115,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); -static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state); +static void skl_init_scalers(struct drm_i915_private *dev_priv, + struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state); static void skylake_pfit_enable(struct intel_crtc *crtc); static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_enable(struct intel_crtc *crtc); @@ -599,7 +600,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock) * the given connectors. */ -static bool intel_PLL_is_valid(struct drm_device *dev, +static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, const struct intel_limit *limit, const struct dpll *clock) { @@ -612,12 +613,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev, if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) INTELPllInvalid("m1 out of range\n"); - if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && - !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) + if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && + !IS_CHERRYVIEW(dev_priv) && !IS_BROXTON(dev_priv)) if (clock->m1 <= clock->m2) INTELPllInvalid("m1 <= m2\n"); - if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) { + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && + !IS_BROXTON(dev_priv)) { if (clock->p < limit->p.min || limit->p.max < clock->p) INTELPllInvalid("p out of range\n"); if (clock->m < limit->m.min || limit->m.max < clock->m) @@ -697,7 +699,8 @@ i9xx_find_best_dpll(const struct intel_limit *limit, int this_err; i9xx_calc_dpll_params(refclk, &clock); - if (!intel_PLL_is_valid(dev, limit, + if (!intel_PLL_is_valid(to_i915(dev), + limit, &clock)) continue; if (match_clock && @@ -752,7 +755,8 @@ pnv_find_best_dpll(const struct intel_limit *limit, int this_err; pnv_calc_dpll_params(refclk, &clock); - if (!intel_PLL_is_valid(dev, limit, + if (!intel_PLL_is_valid(to_i915(dev), + limit, &clock)) continue; if (match_clock && @@ -812,7 +816,8 @@ g4x_find_best_dpll(const struct intel_limit *limit, int this_err; i9xx_calc_dpll_params(refclk, &clock); - if (!intel_PLL_is_valid(dev, limit, + if (!intel_PLL_is_valid(to_i915(dev), + limit, &clock)) continue; @@ -844,7 +849,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, * For CHV ignore the error and consider only the P value. * Prefer a bigger P value based on HW requirements. */ - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(to_i915(dev))) { *error_ppm = 0; return calculated_clock->p > best_clock->p; @@ -908,7 +913,8 @@ vlv_find_best_dpll(const struct intel_limit *limit, vlv_calc_dpll_params(refclk, &clock); - if (!intel_PLL_is_valid(dev, limit, + if (!intel_PLL_is_valid(to_i915(dev), + limit, &clock)) continue; @@ -976,7 +982,7 @@ chv_find_best_dpll(const struct intel_limit *limit, chv_calc_dpll_params(refclk, &clock); - if (!intel_PLL_is_valid(dev, limit, &clock)) + if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) continue; if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, @@ -1002,10 +1008,8 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, target_clock, refclk, NULL, best_clock); } -bool intel_crtc_active(struct drm_crtc *crtc) +bool intel_crtc_active(struct intel_crtc *crtc) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - /* Be paranoid as we can arrive here with only partial * state retrieved from the hardware during setup. * @@ -1019,27 +1023,25 @@ bool intel_crtc_active(struct drm_crtc *crtc) * crtc->state->active once we have proper CRTC states wired up * for atomic. */ - return intel_crtc->active && crtc->primary->state->fb && - intel_crtc->config->base.adjusted_mode.crtc_clock; + return crtc->active && crtc->base.primary->state->fb && + crtc->config->base.adjusted_mode.crtc_clock; } enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - return intel_crtc->config->cpu_transcoder; + return crtc->config->cpu_transcoder; } -static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) +static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; u32 line_mask; - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) line_mask = DSL_LINEMASK_GEN2; else line_mask = DSL_LINEMASK_GEN3; @@ -1069,12 +1071,11 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) */ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; enum pipe pipe = crtc->pipe; - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { i915_reg_t reg = PIPECONF(cpu_transcoder); /* Wait for the Pipe State to go off */ @@ -1084,7 +1085,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc) WARN(1, "pipe_off wait timed out\n"); } else { /* Wait for the display line to settle */ - if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) + if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) WARN(1, "pipe_off wait timed out\n"); } } @@ -1186,34 +1187,32 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, onoff(state), onoff(cur_state)); } -void assert_panel_unlocked(struct drm_i915_private *dev_priv, - enum pipe pipe) +void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = &dev_priv->drm; i915_reg_t pp_reg; u32 val; enum pipe panel_pipe = PIPE_A; bool locked = true; - if (WARN_ON(HAS_DDI(dev))) + if (WARN_ON(HAS_DDI(dev_priv))) return; - if (HAS_PCH_SPLIT(dev)) { + if (HAS_PCH_SPLIT(dev_priv)) { u32 port_sel; - pp_reg = PCH_PP_CONTROL; - port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; + pp_reg = PP_CONTROL(0); + port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; if (port_sel == PANEL_PORT_SELECT_LVDS && I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) panel_pipe = PIPE_B; /* XXX: else fix for eDP */ - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { /* presumably write lock depends on pipe, not port select */ - pp_reg = VLV_PIPE_PP_CONTROL(pipe); + pp_reg = PP_CONTROL(pipe); panel_pipe = pipe; } else { - pp_reg = PP_CONTROL; + pp_reg = PP_CONTROL(0); if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) panel_pipe = PIPE_B; } @@ -1231,10 +1230,9 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, static void assert_cursor(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) { - struct drm_device *dev = &dev_priv->drm; bool cur_state; - if (IS_845G(dev) || IS_I865G(dev)) + if (IS_845G(dev_priv) || IS_I865G(dev_priv)) cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; else cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; @@ -1293,11 +1291,10 @@ static void assert_plane(struct drm_i915_private *dev_priv, static void assert_planes_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = &dev_priv->drm; int i; /* Primary planes are fixed to pipes on gen4+ */ - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { u32 val = I915_READ(DSPCNTR(pipe)); I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, "plane %c assertion failure, should be disabled but not\n", @@ -1319,29 +1316,28 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, static void assert_sprites_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = &dev_priv->drm; int sprite; - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { for_each_sprite(dev_priv, pipe, sprite) { u32 val = I915_READ(PLANE_CTL(pipe, sprite)); I915_STATE_WARN(val & PLANE_CTL_ENABLE, "plane %d assertion failure, should be off on pipe %c but is still active\n", sprite, pipe_name(pipe)); } - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { for_each_sprite(dev_priv, pipe, sprite) { u32 val = I915_READ(SPCNTR(pipe, sprite)); I915_STATE_WARN(val & SP_ENABLE, "sprite %c assertion failure, should be off on pipe %c but is still active\n", sprite_name(pipe, sprite), pipe_name(pipe)); } - } else if (INTEL_INFO(dev)->gen >= 7) { + } else if (INTEL_GEN(dev_priv) >= 7) { u32 val = I915_READ(SPRCTL(pipe)); I915_STATE_WARN(val & SPRITE_ENABLE, "sprite %c assertion failure, should be off on pipe %c but is still active\n", plane_name(pipe), pipe_name(pipe)); - } else if (INTEL_INFO(dev)->gen >= 5) { + } else if (INTEL_GEN(dev_priv) >= 5) { u32 val = I915_READ(DVSCNTR(pipe)); I915_STATE_WARN(val & DVS_ENABLE, "sprite %c assertion failure, should be off on pipe %c but is still active\n", @@ -1595,12 +1591,12 @@ static void chv_enable_pll(struct intel_crtc *crtc, } } -static int intel_num_dvo_pipes(struct drm_device *dev) +static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; int count = 0; - for_each_intel_crtc(dev, crtc) { + for_each_intel_crtc(&dev_priv->drm, crtc) { count += crtc->base.state->active && intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); } @@ -1610,19 +1606,18 @@ static int intel_num_dvo_pipes(struct drm_device *dev) static void i9xx_enable_pll(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); i915_reg_t reg = DPLL(crtc->pipe); u32 dpll = crtc->config->dpll_hw_state.dpll; assert_pipe_disabled(dev_priv, crtc->pipe); /* PLL is protected by panel, make sure we can write it */ - if (IS_MOBILE(dev) && !IS_I830(dev)) + if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) assert_panel_unlocked(dev_priv, crtc->pipe); /* Enable DVO 2x clock on both PLLs if necessary */ - if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) { + if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) { /* * It appears to be important that we don't enable this * for the current pipe before otherwise configuring the @@ -1647,7 +1642,7 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) POSTING_READ(reg); udelay(150); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { I915_WRITE(DPLL_MD(crtc->pipe), crtc->config->dpll_hw_state.dpll_md); } else { @@ -1682,14 +1677,13 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) */ static void i9xx_disable_pll(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; /* Disable DVO 2x clock on both PLLs if necessary */ - if (IS_I830(dev) && + if (IS_I830(dev_priv) && intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && - !intel_num_dvo_pipes(dev)) { + !intel_num_dvo_pipes(dev_priv)) { I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); I915_WRITE(DPLL(PIPE_A), @@ -1785,9 +1779,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = &dev_priv->drm; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, + pipe); i915_reg_t reg; uint32_t val, pipeconf_val; @@ -1798,7 +1791,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, assert_fdi_tx_enabled(dev_priv, pipe); assert_fdi_rx_enabled(dev_priv, pipe); - if (HAS_PCH_CPT(dev)) { + if (HAS_PCH_CPT(dev_priv)) { /* Workaround: Set the timing override bit before enabling the * pch transcoder. */ reg = TRANS_CHICKEN2(pipe); @@ -1876,7 +1869,6 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_device *dev = &dev_priv->drm; i915_reg_t reg; uint32_t val; @@ -1897,7 +1889,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 50)) DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); - if (HAS_PCH_CPT(dev)) { + if (HAS_PCH_CPT(dev_priv)) { /* Workaround: Clear the timing override chicken bit again. */ reg = TRANS_CHICKEN2(pipe); val = I915_READ(reg); @@ -1906,7 +1898,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, } } -static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) +void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) { u32 val; @@ -1925,6 +1917,18 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); } +enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + WARN_ON(!crtc->config->has_pch_encoder); + + if (HAS_PCH_LPT(dev_priv)) + return TRANSCODER_A; + else + return (enum transcoder) crtc->pipe; +} + /** * intel_enable_pipe - enable a pipe, asserting requirements * @crtc: crtc responsible for the pipe @@ -1938,7 +1942,6 @@ static void intel_enable_pipe(struct intel_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; - enum pipe pch_transcoder; i915_reg_t reg; u32 val; @@ -1948,25 +1951,21 @@ static void intel_enable_pipe(struct intel_crtc *crtc) assert_cursor_disabled(dev_priv, pipe); assert_sprites_disabled(dev_priv, pipe); - if (HAS_PCH_LPT(dev_priv)) - pch_transcoder = TRANSCODER_A; - else - pch_transcoder = pipe; - /* * A pipe without a PLL won't actually be able to drive bits from * a plane. On ILK+ the pipe PLLs are integrated, so we don't * need the check. */ - if (HAS_GMCH_DISPLAY(dev_priv)) + if (HAS_GMCH_DISPLAY(dev_priv)) { if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) assert_dsi_pll_enabled(dev_priv); else assert_pll_enabled(dev_priv, pipe); - else { + } else { if (crtc->config->has_pch_encoder) { /* if driving the PCH, we need FDI enabled */ - assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); + assert_fdi_rx_pll_enabled(dev_priv, + (enum pipe) intel_crtc_pch_transcoder(crtc)); assert_fdi_tx_pll_enabled(dev_priv, (enum pipe) cpu_transcoder); } @@ -2138,7 +2137,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, const struct drm_framebuffer *fb, unsigned int rotation) { - if (intel_rotation_90_or_270(rotation)) { + if (drm_rotation_90_or_270(rotation)) { *view = i915_ggtt_view_rotated; view->params.rotated = to_intel_framebuffer(fb)->rot_info; } else { @@ -2146,33 +2145,6 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, } } -static void -intel_fill_fb_info(struct drm_i915_private *dev_priv, - struct drm_framebuffer *fb) -{ - struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info; - unsigned int tile_size, tile_width, tile_height, cpp; - - tile_size = intel_tile_size(dev_priv); - - cpp = drm_format_plane_cpp(fb->pixel_format, 0); - intel_tile_dims(dev_priv, &tile_width, &tile_height, - fb->modifier[0], cpp); - - info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp); - info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height); - - if (info->pixel_format == DRM_FORMAT_NV12) { - cpp = drm_format_plane_cpp(fb->pixel_format, 1); - intel_tile_dims(dev_priv, &tile_width, &tile_height, - fb->modifier[1], cpp); - - info->uv_offset = fb->offsets[1]; - info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp); - info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height); - } -} - static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) { if (INTEL_INFO(dev_priv)->gen >= 9) @@ -2205,20 +2177,19 @@ static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv } } -int -intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, - unsigned int rotation) +struct i915_vma * +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; + struct i915_vma *vma; u32 alignment; - int ret; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); + alignment = intel_surf_alignment(dev_priv, fb->modifier); intel_fill_fb_ggtt_view(&view, fb, rotation); @@ -2239,75 +2210,112 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, */ intel_runtime_pm_get(dev_priv); - ret = i915_gem_object_pin_to_display_plane(obj, alignment, - &view); - if (ret) - goto err_pm; - - /* Install a fence for tiled scan-out. Pre-i965 always needs a - * fence, whereas 965+ only requires a fence if using - * framebuffer compression. For simplicity, we always install - * a fence as the cost is not that onerous. - */ - if (view.type == I915_GGTT_VIEW_NORMAL) { - ret = i915_gem_object_get_fence(obj); - if (ret == -EDEADLK) { - /* - * -EDEADLK means there are no free fences - * no pending flips. - * - * This is propagated to atomic, but it uses - * -EDEADLK to force a locking recovery, so - * change the returned error to -EBUSY. - */ - ret = -EBUSY; - goto err_unpin; - } else if (ret) - goto err_unpin; + vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view); + if (IS_ERR(vma)) + goto err; - i915_gem_object_pin_fence(obj); + if (i915_vma_is_map_and_fenceable(vma)) { + /* Install a fence for tiled scan-out. Pre-i965 always needs a + * fence, whereas 965+ only requires a fence if using + * framebuffer compression. For simplicity, we always, when + * possible, install a fence as the cost is not that onerous. + * + * If we fail to fence the tiled scanout, then either the + * modeset will reject the change (which is highly unlikely as + * the affected systems, all but one, do not have unmappable + * space) or we will not be able to enable full powersaving + * techniques (also likely not to apply due to various limits + * FBC and the like impose on the size of the buffer, which + * presumably we violated anyway with this unmappable buffer). + * Anyway, it is presumably better to stumble onwards with + * something and try to run the system in a "less than optimal" + * mode that matches the user configuration. + */ + if (i915_vma_get_fence(vma) == 0) + i915_vma_pin_fence(vma); } +err: intel_runtime_pm_put(dev_priv); - return 0; - -err_unpin: - i915_gem_object_unpin_from_display_plane(obj, &view); -err_pm: - intel_runtime_pm_put(dev_priv); - return ret; + return vma; } void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; + struct i915_vma *vma; WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); intel_fill_fb_ggtt_view(&view, fb, rotation); + vma = i915_gem_object_to_ggtt(obj, &view); - if (view.type == I915_GGTT_VIEW_NORMAL) - i915_gem_object_unpin_fence(obj); + i915_vma_unpin_fence(vma); + i915_gem_object_unpin_from_display_plane(vma); +} - i915_gem_object_unpin_from_display_plane(obj, &view); +static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, + unsigned int rotation) +{ + if (drm_rotation_90_or_270(rotation)) + return to_intel_framebuffer(fb)->rotated[plane].pitch; + else + return fb->pitches[plane]; +} + +/* + * Convert the x/y offsets into a linear offset. + * Only valid with 0/180 degree rotation, which is fine since linear + * offset is only used with linear buffers on pre-hsw and tiled buffers + * with gen2/3, and 90/270 degree rotations isn't supported on any of them. + */ +u32 intel_fb_xy_to_linear(int x, int y, + const struct intel_plane_state *state, + int plane) +{ + const struct drm_framebuffer *fb = state->base.fb; + unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int pitch = fb->pitches[plane]; + + return y * pitch + x * cpp; +} + +/* + * Add the x/y offsets derived from fb->offsets[] to the user + * specified plane src x/y offsets. The resulting x/y offsets + * specify the start of scanout from the beginning of the gtt mapping. + */ +void intel_add_fb_offsets(int *x, int *y, + const struct intel_plane_state *state, + int plane) + +{ + const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb); + unsigned int rotation = state->base.rotation; + + if (drm_rotation_90_or_270(rotation)) { + *x += intel_fb->rotated[plane].x; + *y += intel_fb->rotated[plane].y; + } else { + *x += intel_fb->normal[plane].x; + *y += intel_fb->normal[plane].y; + } } /* - * Adjust the tile offset by moving the difference into - * the x/y offsets. - * * Input tile dimensions and pitch must already be * rotated to match x and y, and in pixel units. */ -static u32 intel_adjust_tile_offset(int *x, int *y, - unsigned int tile_width, - unsigned int tile_height, - unsigned int tile_size, - unsigned int pitch_tiles, - u32 old_offset, - u32 new_offset) +static u32 _intel_adjust_tile_offset(int *x, int *y, + unsigned int tile_width, + unsigned int tile_height, + unsigned int tile_size, + unsigned int pitch_tiles, + u32 old_offset, + u32 new_offset) { + unsigned int pitch_pixels = pitch_tiles * tile_width; unsigned int tiles; WARN_ON(old_offset & (tile_size - 1)); @@ -2319,6 +2327,54 @@ static u32 intel_adjust_tile_offset(int *x, int *y, *y += tiles / pitch_tiles * tile_height; *x += tiles % pitch_tiles * tile_width; + /* minimize x in case it got needlessly big */ + *y += *x / pitch_pixels * tile_height; + *x %= pitch_pixels; + + return new_offset; +} + +/* + * Adjust the tile offset by moving the difference into + * the x/y offsets. + */ +static u32 intel_adjust_tile_offset(int *x, int *y, + const struct intel_plane_state *state, int plane, + u32 old_offset, u32 new_offset) +{ + const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); + const struct drm_framebuffer *fb = state->base.fb; + unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int rotation = state->base.rotation; + unsigned int pitch = intel_fb_pitch(fb, plane, rotation); + + WARN_ON(new_offset > old_offset); + + if (fb->modifier != DRM_FORMAT_MOD_NONE) { + unsigned int tile_size, tile_width, tile_height; + unsigned int pitch_tiles; + + tile_size = intel_tile_size(dev_priv); + intel_tile_dims(dev_priv, &tile_width, &tile_height, + fb->modifier, cpp); + + if (drm_rotation_90_or_270(rotation)) { + pitch_tiles = pitch / tile_height; + swap(tile_width, tile_height); + } else { + pitch_tiles = pitch / (tile_width * cpp); + } + + _intel_adjust_tile_offset(x, y, tile_width, tile_height, + tile_size, pitch_tiles, + old_offset, new_offset); + } else { + old_offset += *y * pitch + *x * cpp; + + *y = (old_offset - new_offset) / pitch; + *x = ((old_offset - new_offset) - *y * pitch) / cpp; + } + return new_offset; } @@ -2329,18 +2385,24 @@ static u32 intel_adjust_tile_offset(int *x, int *y, * In the 90/270 rotated case, x and y are assumed * to be already rotated to match the rotated GTT view, and * pitch is the tile_height aligned framebuffer height. + * + * This function is used when computing the derived information + * under intel_framebuffer, so using any of that information + * here is not allowed. Anything under drm_framebuffer can be + * used. This is why the user has to pass in the pitch since it + * is specified in the rotated orientation. */ -u32 intel_compute_tile_offset(int *x, int *y, - const struct drm_framebuffer *fb, int plane, - unsigned int pitch, - unsigned int rotation) +static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, + int *x, int *y, + const struct drm_framebuffer *fb, int plane, + unsigned int pitch, + unsigned int rotation, + u32 alignment) { - const struct drm_i915_private *dev_priv = to_i915(fb->dev); - uint64_t fb_modifier = fb->modifier[plane]; + uint64_t fb_modifier = fb->modifier; unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); - u32 offset, offset_aligned, alignment; + u32 offset, offset_aligned; - alignment = intel_surf_alignment(dev_priv, fb_modifier); if (alignment) alignment--; @@ -2352,7 +2414,7 @@ u32 intel_compute_tile_offset(int *x, int *y, intel_tile_dims(dev_priv, &tile_width, &tile_height, fb_modifier, cpp); - if (intel_rotation_90_or_270(rotation)) { + if (drm_rotation_90_or_270(rotation)) { pitch_tiles = pitch / tile_height; swap(tile_width, tile_height); } else { @@ -2368,9 +2430,9 @@ u32 intel_compute_tile_offset(int *x, int *y, offset = (tile_rows * pitch_tiles + tiles) * tile_size; offset_aligned = offset & ~alignment; - intel_adjust_tile_offset(x, y, tile_width, tile_height, - tile_size, pitch_tiles, - offset, offset_aligned); + _intel_adjust_tile_offset(x, y, tile_width, tile_height, + tile_size, pitch_tiles, + offset, offset_aligned); } else { offset = *y * pitch + *x * cpp; offset_aligned = offset & ~alignment; @@ -2382,6 +2444,177 @@ u32 intel_compute_tile_offset(int *x, int *y, return offset_aligned; } +u32 intel_compute_tile_offset(int *x, int *y, + const struct intel_plane_state *state, + int plane) +{ + const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); + const struct drm_framebuffer *fb = state->base.fb; + unsigned int rotation = state->base.rotation; + int pitch = intel_fb_pitch(fb, plane, rotation); + u32 alignment; + + /* AUX_DIST needs only 4K alignment */ + if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1) + alignment = 4096; + else + alignment = intel_surf_alignment(dev_priv, fb->modifier); + + return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch, + rotation, alignment); +} + +/* Convert the fb->offset[] linear offset into x/y offsets */ +static void intel_fb_offset_to_xy(int *x, int *y, + const struct drm_framebuffer *fb, int plane) +{ + unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int pitch = fb->pitches[plane]; + u32 linear_offset = fb->offsets[plane]; + + *y = linear_offset / pitch; + *x = linear_offset % pitch / cpp; +} + +static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) +{ + switch (fb_modifier) { + case I915_FORMAT_MOD_X_TILED: + return I915_TILING_X; + case I915_FORMAT_MOD_Y_TILED: + return I915_TILING_Y; + default: + return I915_TILING_NONE; + } +} + +static int +intel_fill_fb_info(struct drm_i915_private *dev_priv, + struct drm_framebuffer *fb) +{ + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct intel_rotation_info *rot_info = &intel_fb->rot_info; + u32 gtt_offset_rotated = 0; + unsigned int max_size = 0; + uint32_t format = fb->pixel_format; + int i, num_planes = drm_format_num_planes(format); + unsigned int tile_size = intel_tile_size(dev_priv); + + for (i = 0; i < num_planes; i++) { + unsigned int width, height; + unsigned int cpp, size; + u32 offset; + int x, y; + + cpp = drm_format_plane_cpp(format, i); + width = drm_format_plane_width(fb->width, format, i); + height = drm_format_plane_height(fb->height, format, i); + + intel_fb_offset_to_xy(&x, &y, fb, i); + + /* + * The fence (if used) is aligned to the start of the object + * so having the framebuffer wrap around across the edge of the + * fenced region doesn't really work. We have no API to configure + * the fence start offset within the object (nor could we probably + * on gen2/3). So it's just easier if we just require that the + * fb layout agrees with the fence layout. We already check that the + * fb stride matches the fence stride elsewhere. + */ + if (i915_gem_object_is_tiled(intel_fb->obj) && + (x + width) * cpp > fb->pitches[i]) { + DRM_DEBUG("bad fb plane %d offset: 0x%x\n", + i, fb->offsets[i]); + return -EINVAL; + } + + /* + * First pixel of the framebuffer from + * the start of the normal gtt mapping. + */ + intel_fb->normal[i].x = x; + intel_fb->normal[i].y = y; + + offset = _intel_compute_tile_offset(dev_priv, &x, &y, + fb, 0, fb->pitches[i], + DRM_ROTATE_0, tile_size); + offset /= tile_size; + + if (fb->modifier != DRM_FORMAT_MOD_NONE) { + unsigned int tile_width, tile_height; + unsigned int pitch_tiles; + struct drm_rect r; + + intel_tile_dims(dev_priv, &tile_width, &tile_height, + fb->modifier, cpp); + + rot_info->plane[i].offset = offset; + rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); + rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); + rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); + + intel_fb->rotated[i].pitch = + rot_info->plane[i].height * tile_height; + + /* how many tiles does this plane need */ + size = rot_info->plane[i].stride * rot_info->plane[i].height; + /* + * If the plane isn't horizontally tile aligned, + * we need one more tile. + */ + if (x != 0) + size++; + + /* rotate the x/y offsets to match the GTT view */ + r.x1 = x; + r.y1 = y; + r.x2 = x + width; + r.y2 = y + height; + drm_rect_rotate(&r, + rot_info->plane[i].width * tile_width, + rot_info->plane[i].height * tile_height, + DRM_ROTATE_270); + x = r.x1; + y = r.y1; + + /* rotate the tile dimensions to match the GTT view */ + pitch_tiles = intel_fb->rotated[i].pitch / tile_height; + swap(tile_width, tile_height); + + /* + * We only keep the x/y offsets, so push all of the + * gtt offset into the x/y offsets. + */ + _intel_adjust_tile_offset(&x, &y, tile_size, + tile_width, tile_height, pitch_tiles, + gtt_offset_rotated * tile_size, 0); + + gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; + + /* + * First pixel of the framebuffer from + * the start of the rotated gtt mapping. + */ + intel_fb->rotated[i].x = x; + intel_fb->rotated[i].y = y; + } else { + size = DIV_ROUND_UP((y + height) * fb->pitches[i] + + x * cpp, tile_size); + } + + /* how many tiles in total needed in the bo */ + max_size = max(max_size, offset + size); + } + + if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) { + DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n", + max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size); + return -EINVAL; + } + + return 0; +} + static int i9xx_format_to_fourcc(int format) { switch (format) { @@ -2465,15 +2698,14 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, return false; } - obj->tiling_mode = plane_config->tiling; - if (obj->tiling_mode == I915_TILING_X) - obj->stride = fb->pitches[0]; + if (plane_config->tiling == I915_TILING_X) + obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X; mode_cmd.pixel_format = fb->pixel_format; mode_cmd.width = fb->width; mode_cmd.height = fb->height; mode_cmd.pitches[0] = fb->pitches[0]; - mode_cmd.modifier[0] = fb->modifier[0]; + mode_cmd.modifier[0] = fb->modifier; mode_cmd.flags = DRM_MODE_FB_MODIFIERS; if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), @@ -2488,7 +2720,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, return true; out_unref_obj: - drm_gem_object_unreference(&obj->base); + i915_gem_object_put(obj); mutex_unlock(&dev->struct_mutex); return false; } @@ -2552,7 +2784,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, continue; obj = intel_fb_obj(fb); - if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { + if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) { drm_framebuffer_reference(fb); goto valid_fb; } @@ -2565,7 +2797,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, * simplest solution is to just disable the primary plane now and * pretend the BIOS never had it enabled. */ - to_intel_plane_state(plane_state)->visible = false; + to_intel_plane_state(plane_state)->base.visible = false; crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); intel_pre_disable_primary_noatomic(&intel_crtc->base); intel_plane->disable_plane(primary, &intel_crtc->base); @@ -2583,49 +2815,205 @@ valid_fb: plane_state->crtc_w = fb->width; plane_state->crtc_h = fb->height; - intel_state->src.x1 = plane_state->src_x; - intel_state->src.y1 = plane_state->src_y; - intel_state->src.x2 = plane_state->src_x + plane_state->src_w; - intel_state->src.y2 = plane_state->src_y + plane_state->src_h; - intel_state->dst.x1 = plane_state->crtc_x; - intel_state->dst.y1 = plane_state->crtc_y; - intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w; - intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h; + intel_state->base.src = drm_plane_state_src(plane_state); + intel_state->base.dst = drm_plane_state_dest(plane_state); obj = intel_fb_obj(fb); - if (obj->tiling_mode != I915_TILING_NONE) + if (i915_gem_object_is_tiled(obj)) dev_priv->preserve_bios_swizzle = true; drm_framebuffer_reference(fb); primary->fb = primary->state->fb = fb; primary->crtc = primary->state->crtc = &intel_crtc->base; intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary)); - obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit; + atomic_or(to_intel_plane(primary)->frontbuffer_bit, + &obj->frontbuffer_bits); +} + +static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, + unsigned int rotation) +{ + int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + + switch (fb->modifier) { + case DRM_FORMAT_MOD_NONE: + case I915_FORMAT_MOD_X_TILED: + switch (cpp) { + case 8: + return 4096; + case 4: + case 2: + case 1: + return 8192; + default: + MISSING_CASE(cpp); + break; + } + break; + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: + switch (cpp) { + case 8: + return 2048; + case 4: + return 4096; + case 2: + case 1: + return 8192; + default: + MISSING_CASE(cpp); + break; + } + break; + default: + MISSING_CASE(fb->modifier); + } + + return 2048; +} + +static int skl_check_main_surface(struct intel_plane_state *plane_state) +{ + const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + int x = plane_state->base.src.x1 >> 16; + int y = plane_state->base.src.y1 >> 16; + int w = drm_rect_width(&plane_state->base.src) >> 16; + int h = drm_rect_height(&plane_state->base.src) >> 16; + int max_width = skl_max_plane_width(fb, 0, rotation); + int max_height = 4096; + u32 alignment, offset, aux_offset = plane_state->aux.offset; + + if (w > max_width || h > max_height) { + DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", + w, h, max_width, max_height); + return -EINVAL; + } + + intel_add_fb_offsets(&x, &y, plane_state, 0); + offset = intel_compute_tile_offset(&x, &y, plane_state, 0); + + alignment = intel_surf_alignment(dev_priv, fb->modifier); + + /* + * AUX surface offset is specified as the distance from the + * main surface offset, and it must be non-negative. Make + * sure that is what we will get. + */ + if (offset > aux_offset) + offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, + offset, aux_offset & ~(alignment - 1)); + + /* + * When using an X-tiled surface, the plane blows up + * if the x offset + width exceed the stride. + * + * TODO: linear and Y-tiled seem fine, Yf untested, + */ + if (fb->modifier == I915_FORMAT_MOD_X_TILED) { + int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + + while ((x + w) * cpp > fb->pitches[0]) { + if (offset == 0) { + DRM_DEBUG_KMS("Unable to find suitable display surface offset\n"); + return -EINVAL; + } + + offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, + offset, offset - alignment); + } + } + + plane_state->main.offset = offset; + plane_state->main.x = x; + plane_state->main.y = y; + + return 0; +} + +static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + int max_width = skl_max_plane_width(fb, 1, rotation); + int max_height = 4096; + int x = plane_state->base.src.x1 >> 17; + int y = plane_state->base.src.y1 >> 17; + int w = drm_rect_width(&plane_state->base.src) >> 17; + int h = drm_rect_height(&plane_state->base.src) >> 17; + u32 offset; + + intel_add_fb_offsets(&x, &y, plane_state, 1); + offset = intel_compute_tile_offset(&x, &y, plane_state, 1); + + /* FIXME not quite sure how/if these apply to the chroma plane */ + if (w > max_width || h > max_height) { + DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", + w, h, max_width, max_height); + return -EINVAL; + } + + plane_state->aux.offset = offset; + plane_state->aux.x = x; + plane_state->aux.y = y; + + return 0; +} + +int skl_check_plane_surface(struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int rotation = plane_state->base.rotation; + int ret; + + /* Rotate src coordinates to match rotated GTT view */ + if (drm_rotation_90_or_270(rotation)) + drm_rect_rotate(&plane_state->base.src, + fb->width << 16, fb->height << 16, + DRM_ROTATE_270); + + /* + * Handle the AUX surface first since + * the main surface setup depends on it. + */ + if (fb->pixel_format == DRM_FORMAT_NV12) { + ret = skl_check_nv12_aux_surface(plane_state); + if (ret) + return ret; + } else { + plane_state->aux.offset = ~0xfff; + plane_state->aux.x = 0; + plane_state->aux.y = 0; + } + + ret = skl_check_main_surface(plane_state); + if (ret) + return ret; + + return 0; } static void i9xx_update_primary_plane(struct drm_plane *primary, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_device *dev = primary->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(primary->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; u32 linear_offset; u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); unsigned int rotation = plane_state->base.rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); - int x = plane_state->src.x1 >> 16; - int y = plane_state->src.y1 >> 16; + int x = plane_state->base.src.x1 >> 16; + int y = plane_state->base.src.y1 >> 16; dspcntr = DISPPLANE_GAMMA_ENABLE; dspcntr |= DISPLAY_PLANE_ENABLE; - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { if (intel_crtc->pipe == PIPE_B) dspcntr |= DISPPLANE_SEL_PIPE_B; @@ -2636,7 +3024,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, ((crtc_state->pipe_src_h - 1) << 16) | (crtc_state->pipe_src_w - 1)); I915_WRITE(DSPPOS(plane), 0); - } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { + } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) { I915_WRITE(PRIMSIZE(plane), ((crtc_state->pipe_src_h - 1) << 16) | (crtc_state->pipe_src_w - 1)); @@ -2670,36 +3058,36 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, BUG(); } - if (INTEL_INFO(dev)->gen >= 4 && - obj->tiling_mode != I915_TILING_NONE) + if (INTEL_GEN(dev_priv) >= 4 && + fb->modifier == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; - if (IS_G4X(dev)) + if (rotation & DRM_ROTATE_180) + dspcntr |= DISPPLANE_ROTATE_180; + + if (rotation & DRM_REFLECT_X) + dspcntr |= DISPPLANE_MIRROR; + + if (IS_G4X(dev_priv)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - linear_offset = y * fb->pitches[0] + x * cpp; + intel_add_fb_offsets(&x, &y, plane_state, 0); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) intel_crtc->dspaddr_offset = - intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= intel_crtc->dspaddr_offset; - } else { - intel_crtc->dspaddr_offset = linear_offset; - } + intel_compute_tile_offset(&x, &y, plane_state, 0); - if (rotation == BIT(DRM_ROTATE_180)) { - dspcntr |= DISPPLANE_ROTATE_180; + if (rotation & DRM_ROTATE_180) { + x += crtc_state->pipe_src_w - 1; + y += crtc_state->pipe_src_h - 1; + } else if (rotation & DRM_REFLECT_X) { + x += crtc_state->pipe_src_w - 1; + } - x += (crtc_state->pipe_src_w - 1); - y += (crtc_state->pipe_src_h - 1); + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - /* Finding the last pixel of the last line of the display - data and adding to linear_offset*/ - linear_offset += - (crtc_state->pipe_src_h - 1) * fb->pitches[0] + - (crtc_state->pipe_src_w - 1) * cpp; - } + if (INTEL_GEN(dev_priv) < 4) + intel_crtc->dspaddr_offset = linear_offset; intel_crtc->adjusted_x = x; intel_crtc->adjusted_y = y; @@ -2707,13 +3095,17 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, I915_WRITE(reg, dspcntr); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { I915_WRITE(DSPSURF(plane), - i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); + intel_fb_gtt_offset(fb, rotation) + + intel_crtc->dspaddr_offset); I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); I915_WRITE(DSPLINOFF(plane), linear_offset); - } else - I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); + } else { + I915_WRITE(DSPADDR(plane), + intel_fb_gtt_offset(fb, rotation) + + intel_crtc->dspaddr_offset); + } POSTING_READ(reg); } @@ -2741,20 +3133,18 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int plane = intel_crtc->plane; u32 linear_offset; u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); unsigned int rotation = plane_state->base.rotation; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); - int x = plane_state->src.x1 >> 16; - int y = plane_state->src.y1 >> 16; + int x = plane_state->base.src.x1 >> 16; + int y = plane_state->base.src.y1 >> 16; dspcntr = DISPPLANE_GAMMA_ENABLE; dspcntr |= DISPLAY_PLANE_ENABLE; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; switch (fb->pixel_format) { @@ -2780,32 +3170,29 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, BUG(); } - if (obj->tiling_mode != I915_TILING_NONE) + if (fb->modifier == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; - if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) + if (rotation & DRM_ROTATE_180) + dspcntr |= DISPPLANE_ROTATE_180; + + if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - linear_offset = y * fb->pitches[0] + x * cpp; - intel_crtc->dspaddr_offset = - intel_compute_tile_offset(&x, &y, fb, 0, - fb->pitches[0], rotation); - linear_offset -= intel_crtc->dspaddr_offset; - if (rotation == BIT(DRM_ROTATE_180)) { - dspcntr |= DISPPLANE_ROTATE_180; + intel_add_fb_offsets(&x, &y, plane_state, 0); - if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { - x += (crtc_state->pipe_src_w - 1); - y += (crtc_state->pipe_src_h - 1); + intel_crtc->dspaddr_offset = + intel_compute_tile_offset(&x, &y, plane_state, 0); - /* Finding the last pixel of the last line of the display - data and adding to linear_offset*/ - linear_offset += - (crtc_state->pipe_src_h - 1) * fb->pitches[0] + - (crtc_state->pipe_src_w - 1) * cpp; - } + /* HSW+ does this automagically in hardware */ + if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) && + rotation & DRM_ROTATE_180) { + x += crtc_state->pipe_src_w - 1; + y += crtc_state->pipe_src_h - 1; } + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + intel_crtc->adjusted_x = x; intel_crtc->adjusted_y = y; @@ -2813,8 +3200,9 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSURF(plane), - i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + intel_fb_gtt_offset(fb, rotation) + + intel_crtc->dspaddr_offset); + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { I915_WRITE(DSPOFFSET(plane), (y << 16) | x); } else { I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); @@ -2835,32 +3223,21 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, } } -u32 intel_plane_obj_offset(struct intel_plane *intel_plane, - struct drm_i915_gem_object *obj, - unsigned int plane) +u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, + unsigned int rotation) { + struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; struct i915_vma *vma; - u64 offset; - intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, - intel_plane->base.state->rotation); + intel_fill_fb_ggtt_view(&view, fb, rotation); - vma = i915_gem_obj_to_ggtt_view(obj, &view); + vma = i915_gem_object_to_ggtt(obj, &view); if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", - view.type)) + view.type)) return -1; - offset = vma->node.start; - - if (plane == 1) { - offset += vma->ggtt_view.params.rotated.uv_start_page * - PAGE_SIZE; - } - - WARN_ON(upper_32_bits(offset)); - - return lower_32_bits(offset); + return i915_ggtt_offset(vma); } static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) @@ -2890,6 +3267,28 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc) } } +u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, + unsigned int rotation) +{ + const struct drm_i915_private *dev_priv = to_i915(fb->dev); + u32 stride = intel_fb_pitch(fb, plane, rotation); + + /* + * The stride is either expressed as a multiple of 64 bytes chunks for + * linear buffers or in number of tiles for tiled buffers. + */ + if (drm_rotation_90_or_270(rotation)) { + int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + + stride /= intel_tile_height(dev_priv, fb->modifier, cpp); + } else { + stride /= intel_fb_stride_alignment(dev_priv, fb->modifier, + fb->pixel_format); + } + + return stride; +} + u32 skl_plane_ctl_format(uint32_t pixel_format) { switch (pixel_format) { @@ -2952,17 +3351,17 @@ u32 skl_plane_ctl_tiling(uint64_t fb_modifier) u32 skl_plane_ctl_rotation(unsigned int rotation) { switch (rotation) { - case BIT(DRM_ROTATE_0): + case DRM_ROTATE_0: break; /* * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr * while i915 HW rotation is clockwise, thats why this swapping. */ - case BIT(DRM_ROTATE_90): + case DRM_ROTATE_90: return PLANE_CTL_ROTATE_270; - case BIT(DRM_ROTATE_180): + case DRM_ROTATE_180: return PLANE_CTL_ROTATE_180; - case BIT(DRM_ROTATE_270): + case DRM_ROTATE_270: return PLANE_CTL_ROTATE_90; default: MISSING_CASE(rotation); @@ -2979,62 +3378,45 @@ static void skylake_update_primary_plane(struct drm_plane *plane, struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_framebuffer *fb = plane_state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_crtc->pipe; - u32 plane_ctl, stride_div, stride; - u32 tile_height, plane_offset, plane_size; + u32 plane_ctl; unsigned int rotation = plane_state->base.rotation; - int x_offset, y_offset; - u32 surf_addr; + u32 stride = skl_plane_stride(fb, 0, rotation); + u32 surf_addr = plane_state->main.offset; int scaler_id = plane_state->scaler_id; - int src_x = plane_state->src.x1 >> 16; - int src_y = plane_state->src.y1 >> 16; - int src_w = drm_rect_width(&plane_state->src) >> 16; - int src_h = drm_rect_height(&plane_state->src) >> 16; - int dst_x = plane_state->dst.x1; - int dst_y = plane_state->dst.y1; - int dst_w = drm_rect_width(&plane_state->dst); - int dst_h = drm_rect_height(&plane_state->dst); + int src_x = plane_state->main.x; + int src_y = plane_state->main.y; + int src_w = drm_rect_width(&plane_state->base.src) >> 16; + int src_h = drm_rect_height(&plane_state->base.src) >> 16; + int dst_x = plane_state->base.dst.x1; + int dst_y = plane_state->base.dst.y1; + int dst_w = drm_rect_width(&plane_state->base.dst); + int dst_h = drm_rect_height(&plane_state->base.dst); plane_ctl = PLANE_CTL_ENABLE | PLANE_CTL_PIPE_GAMMA_ENABLE | PLANE_CTL_PIPE_CSC_ENABLE; plane_ctl |= skl_plane_ctl_format(fb->pixel_format); - plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); + plane_ctl |= skl_plane_ctl_tiling(fb->modifier); plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; plane_ctl |= skl_plane_ctl_rotation(rotation); - stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], - fb->pixel_format); - surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0); - - WARN_ON(drm_rect_width(&plane_state->src) == 0); - - if (intel_rotation_90_or_270(rotation)) { - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + /* Sizes are 0 based */ + src_w--; + src_h--; + dst_w--; + dst_h--; - /* stride = Surface height in tiles */ - tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); - stride = DIV_ROUND_UP(fb->height, tile_height); - x_offset = stride * tile_height - src_y - src_h; - y_offset = src_x; - plane_size = (src_w - 1) << 16 | (src_h - 1); - } else { - stride = fb->pitches[0] / stride_div; - x_offset = src_x; - y_offset = src_y; - plane_size = (src_h - 1) << 16 | (src_w - 1); - } - plane_offset = y_offset << 16 | x_offset; + intel_crtc->dspaddr_offset = surf_addr; - intel_crtc->adjusted_x = x_offset; - intel_crtc->adjusted_y = y_offset; + intel_crtc->adjusted_x = src_x; + intel_crtc->adjusted_y = src_y; I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); - I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); - I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); + I915_WRITE(PLANE_OFFSET(pipe, 0), (src_y << 16) | src_x); I915_WRITE(PLANE_STRIDE(pipe, 0), stride); + I915_WRITE(PLANE_SIZE(pipe, 0), (src_h << 16) | src_w); if (scaler_id >= 0) { uint32_t ps_ctrl = 0; @@ -3051,7 +3433,8 @@ static void skylake_update_primary_plane(struct drm_plane *plane, I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); } - I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); + I915_WRITE(PLANE_SURF(pipe, 0), + intel_fb_gtt_offset(fb, rotation) + surf_addr); POSTING_READ(PLANE_SURF(pipe, 0)); } @@ -3061,7 +3444,8 @@ static void skylake_disable_primary_plane(struct drm_plane *primary, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = to_intel_crtc(crtc)->pipe; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; I915_WRITE(PLANE_CTL(pipe, 0), 0); I915_WRITE(PLANE_SURF(pipe, 0), 0); @@ -3096,7 +3480,7 @@ static void intel_update_primary_planes(struct drm_device *dev) struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); - if (plane_state->visible) + if (plane_state->base.visible) plane->update_plane(&plane->base, to_intel_crtc_state(crtc->state), plane_state); @@ -3112,7 +3496,7 @@ __intel_display_resume(struct drm_device *dev, int i, ret; intel_modeset_setup_hw_state(dev); - i915_redisable_vga(dev); + i915_redisable_vga(to_i915(dev)); if (!state) return 0; @@ -3135,6 +3519,12 @@ __intel_display_resume(struct drm_device *dev, return ret; } +static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) +{ + return intel_has_gpu_reset(dev_priv) && + INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv); +} + void intel_prepare_reset(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; @@ -3142,10 +3532,6 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) struct drm_atomic_state *state; int ret; - /* no reset support for gen2 */ - if (IS_GEN2(dev_priv)) - return; - /* * Need mode_config.mutex so that we don't * trample ongoing ->detect() and whatnot. @@ -3161,7 +3547,8 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) } /* reset doesn't touch the display, but flips might get nuked anyway, */ - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + if (!i915.force_reset_modeset_test && + !gpu_reset_clobbers_display(dev_priv)) return; /* @@ -3187,7 +3574,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) return; err: - drm_atomic_state_free(state); + drm_atomic_state_put(state); } void intel_finish_reset(struct drm_i915_private *dev_priv) @@ -3204,24 +3591,26 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) */ intel_complete_page_flips(dev_priv); - /* no reset support for gen2 */ - if (IS_GEN2(dev_priv)) - return; - dev_priv->modeset_restore_state = NULL; /* reset doesn't touch the display */ - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { - /* - * Flips in the rings have been nuked by the reset, - * so update the base address of all primary - * planes to the the last fb to make sure we're - * showing the correct fb after a reset. - * - * FIXME: Atomic will make this obsolete since we won't schedule - * CS-based flips (which might get lost in gpu resets) any more. - */ - intel_update_primary_planes(dev); + if (!gpu_reset_clobbers_display(dev_priv)) { + if (!state) { + /* + * Flips in the rings have been nuked by the reset, + * so update the base address of all primary + * planes to the the last fb to make sure we're + * showing the correct fb after a reset. + * + * FIXME: Atomic will make this obsolete since we won't schedule + * CS-based flips (which might get lost in gpu resets) any more. + */ + intel_update_primary_planes(dev); + } else { + ret = __intel_display_resume(dev, state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); + } } else { /* * The display has been reset as well, @@ -3230,6 +3619,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv); + intel_pps_unlock_regs_wa(dev_priv); intel_modeset_init_hw(dev); spin_lock_irq(&dev_priv->irq_lock); @@ -3244,20 +3634,33 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) intel_hpd_init(dev_priv); } + if (state) + drm_atomic_state_put(state); drm_modeset_drop_locks(ctx); drm_modeset_acquire_fini(ctx); mutex_unlock(&dev->mode_config.mutex); } +static bool abort_flip_on_reset(struct intel_crtc *crtc) +{ + struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error; + + if (i915_reset_in_progress(error)) + return true; + + if (crtc->reset_count != i915_reset_count(error)) + return true; + + return false; +} + static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - unsigned reset_counter; bool pending; - reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error); - if (intel_crtc->reset_counter != reset_counter) + if (abort_flip_on_reset(intel_crtc)) return false; spin_lock_irq(&dev->event_lock); @@ -3270,8 +3673,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) static void intel_update_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); @@ -3296,12 +3698,12 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, (pipe_config->pipe_src_h - 1)); /* on skylake this is done by detaching scalers */ - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { skl_detach_scalers(crtc); if (pipe_config->pch_pfit.enabled) skylake_pfit_enable(crtc); - } else if (HAS_PCH_SPLIT(dev)) { + } else if (HAS_PCH_SPLIT(dev_priv)) { if (pipe_config->pch_pfit.enabled) ironlake_pfit_enable(crtc); else if (old_crtc_state->pch_pfit.enabled) @@ -3321,7 +3723,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) /* enable normal train */ reg = FDI_TX_CTL(pipe); temp = I915_READ(reg); - if (IS_IVYBRIDGE(dev)) { + if (IS_IVYBRIDGE(dev_priv)) { temp &= ~FDI_LINK_TRAIN_NONE_IVB; temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; } else { @@ -3332,7 +3734,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); - if (HAS_PCH_CPT(dev)) { + if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_NORMAL_CPT; } else { @@ -3346,7 +3748,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) udelay(1000); /* IVB wants error correction enabled */ - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev_priv)) I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE); } @@ -3490,7 +3892,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); - if (HAS_PCH_CPT(dev)) { + if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; } else { @@ -3534,7 +3936,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) temp = I915_READ(reg); temp &= ~FDI_LINK_TRAIN_NONE; temp |= FDI_LINK_TRAIN_PATTERN_2; - if (IS_GEN6(dev)) { + if (IS_GEN6(dev_priv)) { temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; /* SNB-B */ temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; @@ -3543,7 +3945,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); - if (HAS_PCH_CPT(dev)) { + if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; } else { @@ -3797,7 +4199,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) udelay(100); /* Ironlake workaround, disable clock pointer after downing FDI */ - if (HAS_PCH_IBX(dev)) + if (HAS_PCH_IBX(dev_priv)) I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); /* still set train pattern 1 */ @@ -3809,7 +4211,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) reg = FDI_RX_CTL(pipe); temp = I915_READ(reg); - if (HAS_PCH_CPT(dev)) { + if (HAS_PCH_CPT(dev_priv)) { temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; } else { @@ -3827,6 +4229,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) bool intel_has_pending_fb_unpin(struct drm_device *dev) { + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *crtc; /* Note that we don't need to be called with mode_config.lock here @@ -3841,7 +4244,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev) continue; if (crtc->flip_work) - intel_wait_for_vblank(dev, crtc->pipe); + intel_wait_for_vblank(dev_priv, crtc->pipe); return true; } @@ -3900,7 +4303,7 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) return 0; } -static void lpt_disable_iclkip(struct drm_i915_private *dev_priv) +void lpt_disable_iclkip(struct drm_i915_private *dev_priv) { u32 temp; @@ -4132,7 +4535,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) assert_pch_transcoder_disabled(dev_priv, pipe); - if (IS_IVYBRIDGE(dev)) + if (IS_IVYBRIDGE(dev_priv)) ivybridge_update_fdi_bc_bifurcation(intel_crtc); /* Write the TU size bits before fdi link training, so that error @@ -4145,7 +4548,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) /* We need to program the right clock selection before writing the pixel * mutliplier into the DPLL. */ - if (HAS_PCH_CPT(dev)) { + if (HAS_PCH_CPT(dev_priv)) { u32 sel; temp = I915_READ(PCH_DPLL_SEL); @@ -4175,7 +4578,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) intel_fdi_normal_train(crtc); /* For PCH DP, enable TRANS_DP_CTL */ - if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) { + if (HAS_PCH_CPT(dev_priv) && + intel_crtc_has_dp_encoder(intel_crtc->config)) { const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; @@ -4254,7 +4658,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, to_intel_crtc(crtc_state->base.crtc); int need_scaling; - need_scaling = intel_rotation_90_or_270(rotation) ? + need_scaling = drm_rotation_90_or_270(rotation) ? (src_h != dst_w || src_w != dst_h): (src_w != dst_w || src_h != dst_h); @@ -4315,15 +4719,10 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, */ int skl_update_scaler_crtc(struct intel_crtc_state *state) { - struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; - DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n", - intel_crtc->base.base.id, intel_crtc->base.name, - intel_crtc->pipe, SKL_CRTC_INDEX); - return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, - &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), + &state->scaler_state.scaler_id, DRM_ROTATE_0, state->pipe_src_w, state->pipe_src_h, adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); } @@ -4342,26 +4741,21 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane); struct drm_framebuffer *fb = plane_state->base.fb; int ret; - bool force_detach = !fb || !plane_state->visible; - - DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n", - intel_plane->base.base.id, intel_plane->base.name, - intel_crtc->pipe, drm_plane_index(&intel_plane->base)); + bool force_detach = !fb || !plane_state->base.visible; ret = skl_update_scaler(crtc_state, force_detach, drm_plane_index(&intel_plane->base), &plane_state->scaler_id, plane_state->base.rotation, - drm_rect_width(&plane_state->src) >> 16, - drm_rect_height(&plane_state->src) >> 16, - drm_rect_width(&plane_state->dst), - drm_rect_height(&plane_state->dst)); + drm_rect_width(&plane_state->base.src) >> 16, + drm_rect_height(&plane_state->base.src) >> 16, + drm_rect_width(&plane_state->base.dst), + drm_rect_height(&plane_state->base.dst)); if (ret || plane_state->scaler_id < 0) return ret; @@ -4445,7 +4839,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc) * as some pre-programmed values are broken, * e.g. x201. */ - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) + if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); else @@ -4470,7 +4864,7 @@ void hsw_enable_ips(struct intel_crtc *crtc) */ assert_plane_enabled(dev_priv, crtc->plane); - if (IS_BROADWELL(dev)) { + if (IS_BROADWELL(dev_priv)) { mutex_lock(&dev_priv->rps.hw_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); mutex_unlock(&dev_priv->rps.hw_lock); @@ -4502,7 +4896,7 @@ void hsw_disable_ips(struct intel_crtc *crtc) return; assert_plane_enabled(dev_priv, crtc->plane); - if (IS_BROADWELL(dev)) { + if (IS_BROADWELL(dev_priv)) { mutex_lock(&dev_priv->rps.hw_lock); WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); mutex_unlock(&dev_priv->rps.hw_lock); @@ -4517,7 +4911,7 @@ void hsw_disable_ips(struct intel_crtc *crtc) } /* We need to wait for a vblank before we can disable the plane. */ - intel_wait_for_vblank(dev, crtc->pipe); + intel_wait_for_vblank(dev_priv, crtc->pipe); } static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) @@ -4571,7 +4965,7 @@ intel_post_enable_primary(struct drm_crtc *crtc) * FIXME: Need to fix the logic to work when we turn off all planes * but leave the pipe running. */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); /* Underruns don't always raise interrupts, so check manually. */ @@ -4594,7 +4988,7 @@ intel_pre_disable_primary(struct drm_crtc *crtc) * FIXME: Need to fix the logic to work when we turn off all planes * but leave the pipe running. */ - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); /* @@ -4626,10 +5020,10 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) * event which is after the vblank start event, so we need to have a * wait-for-vblank between disabling the plane and the pipe. */ - if (HAS_GMCH_DISPLAY(dev)) { + if (HAS_GMCH_DISPLAY(dev_priv)) { intel_set_memory_cxsr(dev_priv, false); dev_priv->wm.vlv.cxsr = false; - intel_wait_for_vblank(dev, pipe); + intel_wait_for_vblank(dev_priv, pipe); } } @@ -4639,17 +5033,16 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) struct drm_atomic_state *old_state = old_crtc_state->base.state; struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); - struct drm_device *dev = crtc->base.dev; struct drm_plane *primary = crtc->base.primary; struct drm_plane_state *old_pri_state = drm_atomic_get_existing_plane_state(old_state, primary); - intel_frontbuffer_flip(dev, pipe_config->fb_bits); + intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); crtc->wm.cxsr_allowed = true; if (pipe_config->update_wm_post && pipe_config->base.active) - intel_update_watermarks(&crtc->base); + intel_update_watermarks(crtc); if (old_pri_state) { struct intel_plane_state *primary_state = @@ -4659,9 +5052,9 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) intel_fbc_post_update(crtc); - if (primary_state->visible && + if (primary_state->base.visible && (needs_modeset(&pipe_config->base) || - !old_primary_state->visible)) + !old_primary_state->base.visible)) intel_post_enable_primary(&crtc->base); } } @@ -4678,6 +5071,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) struct drm_plane_state *old_pri_state = drm_atomic_get_existing_plane_state(old_state, primary); bool modeset = needs_modeset(&pipe_config->base); + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); if (old_pri_state) { struct intel_plane_state *primary_state = @@ -4687,12 +5082,12 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) intel_fbc_pre_update(crtc, pipe_config, primary_state); - if (old_primary_state->visible && - (modeset || !primary_state->visible)) + if (old_primary_state->base.visible && + (modeset || !primary_state->base.visible)) intel_pre_disable_primary(&crtc->base); } - if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) { + if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) { crtc->wm.cxsr_allowed = false; /* @@ -4707,7 +5102,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) if (old_crtc_state->base.active) { intel_set_memory_cxsr(dev_priv, false); dev_priv->wm.vlv.cxsr = false; - intel_wait_for_vblank(dev, crtc->pipe); + intel_wait_for_vblank(dev_priv, crtc->pipe); } } @@ -4720,7 +5115,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) */ if (pipe_config->disable_lp_wm) { ilk_disable_lp_wm(dev); - intel_wait_for_vblank(dev, crtc->pipe); + intel_wait_for_vblank(dev_priv, crtc->pipe); } /* @@ -4745,9 +5140,10 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) * us to. */ if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(pipe_config); + dev_priv->display.initial_watermarks(old_intel_state, + pipe_config); else if (pipe_config->update_wm_pre) - intel_update_watermarks(&crtc->base); + intel_update_watermarks(crtc); } static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) @@ -4767,18 +5163,142 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask * to compute the mask of flip planes precisely. For the time being * consider this a flip to a NULL plane. */ - intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); + intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe)); +} + +static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct drm_atomic_state *old_state) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector *conn; + int i; + + for_each_connector_in_state(old_state, conn, old_conn_state, i) { + struct drm_connector_state *conn_state = conn->state; + struct intel_encoder *encoder = + to_intel_encoder(conn_state->best_encoder); + + if (conn_state->crtc != crtc) + continue; + + if (encoder->pre_pll_enable) + encoder->pre_pll_enable(encoder, crtc_state, conn_state); + } +} + +static void intel_encoders_pre_enable(struct drm_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct drm_atomic_state *old_state) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector *conn; + int i; + + for_each_connector_in_state(old_state, conn, old_conn_state, i) { + struct drm_connector_state *conn_state = conn->state; + struct intel_encoder *encoder = + to_intel_encoder(conn_state->best_encoder); + + if (conn_state->crtc != crtc) + continue; + + if (encoder->pre_enable) + encoder->pre_enable(encoder, crtc_state, conn_state); + } +} + +static void intel_encoders_enable(struct drm_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct drm_atomic_state *old_state) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector *conn; + int i; + + for_each_connector_in_state(old_state, conn, old_conn_state, i) { + struct drm_connector_state *conn_state = conn->state; + struct intel_encoder *encoder = + to_intel_encoder(conn_state->best_encoder); + + if (conn_state->crtc != crtc) + continue; + + encoder->enable(encoder, crtc_state, conn_state); + intel_opregion_notify_encoder(encoder, true); + } } -static void ironlake_crtc_enable(struct drm_crtc *crtc) +static void intel_encoders_disable(struct drm_crtc *crtc, + struct intel_crtc_state *old_crtc_state, + struct drm_atomic_state *old_state) { + struct drm_connector_state *old_conn_state; + struct drm_connector *conn; + int i; + + for_each_connector_in_state(old_state, conn, old_conn_state, i) { + struct intel_encoder *encoder = + to_intel_encoder(old_conn_state->best_encoder); + + if (old_conn_state->crtc != crtc) + continue; + + intel_opregion_notify_encoder(encoder, false); + encoder->disable(encoder, old_crtc_state, old_conn_state); + } +} + +static void intel_encoders_post_disable(struct drm_crtc *crtc, + struct intel_crtc_state *old_crtc_state, + struct drm_atomic_state *old_state) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector *conn; + int i; + + for_each_connector_in_state(old_state, conn, old_conn_state, i) { + struct intel_encoder *encoder = + to_intel_encoder(old_conn_state->best_encoder); + + if (old_conn_state->crtc != crtc) + continue; + + if (encoder->post_disable) + encoder->post_disable(encoder, old_crtc_state, old_conn_state); + } +} + +static void intel_encoders_post_pll_disable(struct drm_crtc *crtc, + struct intel_crtc_state *old_crtc_state, + struct drm_atomic_state *old_state) +{ + struct drm_connector_state *old_conn_state; + struct drm_connector *conn; + int i; + + for_each_connector_in_state(old_state, conn, old_conn_state, i) { + struct intel_encoder *encoder = + to_intel_encoder(old_conn_state->best_encoder); + + if (old_conn_state->crtc != crtc) + continue; + + if (encoder->post_pll_disable) + encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); + } +} + +static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, + struct drm_atomic_state *old_state) +{ + struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - struct intel_crtc_state *pipe_config = - to_intel_crtc_state(crtc->state); + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); if (WARN_ON(intel_crtc->active)) return; @@ -4816,9 +5336,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_enable) - encoder->pre_enable(encoder); + intel_encoders_pre_enable(crtc, pipe_config, old_state); if (intel_crtc->config->has_pch_encoder) { /* Note: FDI PLL enabling _must_ be done before we enable the @@ -4839,7 +5357,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_color_load_luts(&pipe_config->base); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(intel_crtc->config); + dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config); intel_enable_pipe(intel_crtc); if (intel_crtc->config->has_pch_encoder) @@ -4848,15 +5366,14 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); + intel_encoders_enable(crtc, pipe_config, old_state); - if (HAS_PCH_CPT(dev)) + if (HAS_PCH_CPT(dev_priv)) cpt_verify_modeset(dev, intel_crtc->pipe); /* Must wait for vblank to avoid spurious PCH FIFO underruns */ if (intel_crtc->config->has_pch_encoder) - intel_wait_for_vblank(dev, pipe); + intel_wait_for_vblank(dev_priv, pipe); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } @@ -4864,19 +5381,19 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) /* IPS only exists on ULT machines and is tied to pipe A. */ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) { - return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; + return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; } -static void haswell_crtc_enable(struct drm_crtc *crtc) +static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, + struct drm_atomic_state *old_state) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_crtc *crtc = pipe_config->base.crtc; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; int pipe = intel_crtc->pipe, hsw_workaround_pipe; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; - struct intel_crtc_state *pipe_config = - to_intel_crtc_state(crtc->state); + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_state); if (WARN_ON(intel_crtc->active)) return; @@ -4885,9 +5402,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, false); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); + intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); if (intel_crtc->config->shared_dpll) intel_enable_shared_dpll(intel_crtc); @@ -4925,10 +5440,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) else intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - for_each_encoder_on_crtc(dev, crtc, encoder) { - if (encoder->pre_enable) - encoder->pre_enable(encoder); - } + intel_encoders_pre_enable(crtc, pipe_config, old_state); if (intel_crtc->config->has_pch_encoder) dev_priv->display.fdi_link_train(crtc); @@ -4936,7 +5448,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_enable_pipe_clock(intel_crtc); - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) skylake_pfit_enable(intel_crtc); else ironlake_pfit_enable(intel_crtc); @@ -4952,9 +5464,10 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_ddi_enable_transcoder_func(crtc); if (dev_priv->display.initial_watermarks != NULL) - dev_priv->display.initial_watermarks(pipe_config); + dev_priv->display.initial_watermarks(old_intel_state, + pipe_config); else - intel_update_watermarks(crtc); + intel_update_watermarks(intel_crtc); /* XXX: Do the pipe assertions at the right place for BXT DSI. */ if (!transcoder_is_dsi(cpu_transcoder)) @@ -4963,20 +5476,17 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) lpt_pch_enable(crtc); - if (intel_crtc->config->dp_encoder_is_mst) + if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) intel_ddi_set_vc_payload_alloc(crtc, true); assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) { - encoder->enable(encoder); - intel_opregion_notify_encoder(encoder, true); - } + intel_encoders_enable(crtc, pipe_config, old_state); if (intel_crtc->config->has_pch_encoder) { - intel_wait_for_vblank(dev, pipe); - intel_wait_for_vblank(dev, pipe); + intel_wait_for_vblank(dev_priv, pipe); + intel_wait_for_vblank(dev_priv, pipe); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, true); @@ -4985,9 +5495,9 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) /* If we change the relative order between pipe/planes enabling, we need * to change the workaround. */ hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; - if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) { - intel_wait_for_vblank(dev, hsw_workaround_pipe); - intel_wait_for_vblank(dev, hsw_workaround_pipe); + if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { + intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); + intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); } } @@ -5006,12 +5516,13 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) } } -static void ironlake_crtc_disable(struct drm_crtc *crtc) +static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, + struct drm_atomic_state *old_state) { + struct drm_crtc *crtc = old_crtc_state->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; int pipe = intel_crtc->pipe; /* @@ -5024,8 +5535,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); } - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->disable(encoder); + intel_encoders_disable(crtc, old_crtc_state, old_state); drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); @@ -5037,14 +5547,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) if (intel_crtc->config->has_pch_encoder) ironlake_fdi_disable(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->post_disable) - encoder->post_disable(encoder); + intel_encoders_post_disable(crtc, old_crtc_state, old_state); if (intel_crtc->config->has_pch_encoder) { ironlake_disable_pch_transcoder(dev_priv, pipe); - if (HAS_PCH_CPT(dev)) { + if (HAS_PCH_CPT(dev_priv)) { i915_reg_t reg; u32 temp; @@ -5069,22 +5577,19 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } -static void haswell_crtc_disable(struct drm_crtc *crtc) +static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, + struct drm_atomic_state *old_state) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_crtc *crtc = old_crtc_state->base.crtc; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; if (intel_crtc->config->has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, false); - for_each_encoder_on_crtc(dev, crtc, encoder) { - intel_opregion_notify_encoder(encoder, false); - encoder->disable(encoder); - } + intel_encoders_disable(crtc, old_crtc_state, old_state); drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); @@ -5093,13 +5598,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) if (!transcoder_is_dsi(cpu_transcoder)) intel_disable_pipe(intel_crtc); - if (intel_crtc->config->dp_encoder_is_mst) + if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) intel_ddi_set_vc_payload_alloc(crtc, false); if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) skylake_scaler_disable(intel_crtc); else ironlake_pfit_disable(intel_crtc, false); @@ -5107,18 +5612,11 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_disable_pipe_clock(intel_crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->post_disable) - encoder->post_disable(encoder); - - if (intel_crtc->config->has_pch_encoder) { - lpt_disable_pch_transcoder(dev_priv); - lpt_disable_iclkip(dev_priv); - intel_ddi_fdi_disable(crtc); + intel_encoders_post_disable(crtc, old_crtc_state, old_state); + if (old_crtc_state->has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, true); - } } static void i9xx_pfit_enable(struct intel_crtc *crtc) @@ -5187,13 +5685,13 @@ static enum intel_display_power_domain port_to_aux_power_domain(enum port port) enum intel_display_power_domain intel_display_port_power_domain(struct intel_encoder *intel_encoder) { - struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); struct intel_digital_port *intel_dig_port; switch (intel_encoder->type) { case INTEL_OUTPUT_UNKNOWN: /* Only DDI platforms should ever use this output type */ - WARN_ON_ONCE(!HAS_DDI(dev)); + WARN_ON_ONCE(!HAS_DDI(dev_priv)); case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: case INTEL_OUTPUT_EDP: @@ -5214,7 +5712,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder) enum intel_display_power_domain intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) { - struct drm_device *dev = intel_encoder->base.dev; + struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev); struct intel_digital_port *intel_dig_port; switch (intel_encoder->type) { @@ -5227,7 +5725,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) * what's the status of the given connectors, play safe and * run the DP detection too. */ - WARN_ON_ONCE(!HAS_DDI(dev)); + WARN_ON_ONCE(!HAS_DDI(dev_priv)); case INTEL_OUTPUT_DP: case INTEL_OUTPUT_EDP: intel_dig_port = enc_to_dig_port(&intel_encoder->base); @@ -5319,11 +5817,9 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) static int skl_calc_cdclk(int max_pixclk, int vco); -static void intel_update_max_cdclk(struct drm_device *dev) +static void intel_update_max_cdclk(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; int max_cdclk, vco; @@ -5345,9 +5841,9 @@ static void intel_update_max_cdclk(struct drm_device *dev) max_cdclk = 308571; dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); - } else if (IS_BROXTON(dev)) { + } else if (IS_BROXTON(dev_priv)) { dev_priv->max_cdclk_freq = 624000; - } else if (IS_BROADWELL(dev)) { + } else if (IS_BROADWELL(dev_priv)) { /* * FIXME with extra cooling we can allow * 540 MHz for ULX and 675 Mhz for ULT. @@ -5356,15 +5852,15 @@ static void intel_update_max_cdclk(struct drm_device *dev) */ if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) dev_priv->max_cdclk_freq = 450000; - else if (IS_BDW_ULX(dev)) + else if (IS_BDW_ULX(dev_priv)) dev_priv->max_cdclk_freq = 450000; - else if (IS_BDW_ULT(dev)) + else if (IS_BDW_ULT(dev_priv)) dev_priv->max_cdclk_freq = 540000; else dev_priv->max_cdclk_freq = 675000; - } else if (IS_CHERRYVIEW(dev)) { + } else if (IS_CHERRYVIEW(dev_priv)) { dev_priv->max_cdclk_freq = 320000; - } else if (IS_VALLEYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv)) { dev_priv->max_cdclk_freq = 400000; } else { /* otherwise assume cdclk is fixed */ @@ -5380,11 +5876,9 @@ static void intel_update_max_cdclk(struct drm_device *dev) dev_priv->max_dotclk_freq); } -static void intel_update_cdclk(struct drm_device *dev) +static void intel_update_cdclk(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - - dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); + dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv); if (INTEL_GEN(dev_priv) >= 9) DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n", @@ -5545,14 +6039,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) return; } - intel_update_cdclk(&dev_priv->drm); + intel_update_cdclk(dev_priv); } static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; - intel_update_cdclk(&dev_priv->drm); + intel_update_cdclk(dev_priv); if (dev_priv->cdclk_pll.vco == 0 || dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) @@ -5685,7 +6179,7 @@ void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco) dev_priv->skl_preferred_vco_freq = vco; if (changed) - intel_update_max_cdclk(&dev_priv->drm); + intel_update_max_cdclk(dev_priv); } static void @@ -5750,36 +6244,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv) dev_priv->cdclk_pll.vco = 0; } -static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) -{ - int ret; - u32 val; - - /* inform PCU we want to change CDCLK */ - val = SKL_CDCLK_PREPARE_FOR_CHANGE; - mutex_lock(&dev_priv->rps.hw_lock); - ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); - mutex_unlock(&dev_priv->rps.hw_lock); - - return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); -} - -static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) -{ - return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0; -} - static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) { - struct drm_device *dev = &dev_priv->drm; u32 freq_select, pcu_ack; + int ret; WARN_ON((cdclk == 24000) != (vco == 0)); DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); - if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { - DRM_ERROR("failed to inform PCU about cdclk change\n"); + mutex_lock(&dev_priv->rps.hw_lock); + ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, + SKL_CDCLK_PREPARE_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, 3); + mutex_unlock(&dev_priv->rps.hw_lock); + if (ret) { + DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", + ret); return; } @@ -5822,7 +6304,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); mutex_unlock(&dev_priv->rps.hw_lock); - intel_update_cdclk(dev); + intel_update_cdclk(dev_priv); } static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv); @@ -5869,7 +6351,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) goto sanitize; - intel_update_cdclk(&dev_priv->drm); + intel_update_cdclk(dev_priv); /* Is PLL enabled and locked ? */ if (dev_priv->cdclk_pll.vco == 0 || dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) @@ -5903,7 +6385,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) struct drm_i915_private *dev_priv = to_i915(dev); u32 val, cmd; - WARN_ON(dev_priv->display.get_display_clock_speed(dev) + WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv) != dev_priv->cdclk_freq); if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ @@ -5960,7 +6442,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) mutex_unlock(&dev_priv->sb_lock); - intel_update_cdclk(dev); + intel_update_cdclk(dev_priv); } static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) @@ -5968,7 +6450,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) struct drm_i915_private *dev_priv = to_i915(dev); u32 val, cmd; - WARN_ON(dev_priv->display.get_display_clock_speed(dev) + WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv) != dev_priv->cdclk_freq); switch (cdclk) { @@ -6001,7 +6483,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) } mutex_unlock(&dev_priv->rps.hw_lock); - intel_update_cdclk(dev); + intel_update_cdclk(dev_priv); } static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, @@ -6164,7 +6646,7 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) */ intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) cherryview_set_cdclk(dev, req_cdclk); else valleyview_set_cdclk(dev, req_cdclk); @@ -6174,14 +6656,13 @@ static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } -static void valleyview_crtc_enable(struct drm_crtc *crtc) +static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, + struct drm_atomic_state *old_state) { + struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; - struct intel_crtc_state *pipe_config = - to_intel_crtc_state(crtc->state); int pipe = intel_crtc->pipe; if (WARN_ON(intel_crtc->active)) @@ -6193,7 +6674,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_set_pipe_timings(intel_crtc); intel_set_pipe_src_size(intel_crtc); - if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { struct drm_i915_private *dev_priv = to_i915(dev); I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); @@ -6206,11 +6687,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_pll_enable) - encoder->pre_pll_enable(encoder); + intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { chv_prepare_pll(intel_crtc, intel_crtc->config); chv_enable_pll(intel_crtc, intel_crtc->config); } else { @@ -6218,22 +6697,19 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) vlv_enable_pll(intel_crtc, intel_crtc->config); } - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_enable) - encoder->pre_enable(encoder); + intel_encoders_pre_enable(crtc, pipe_config, old_state); i9xx_pfit_enable(intel_crtc); intel_color_load_luts(&pipe_config->base); - intel_update_watermarks(crtc); + intel_update_watermarks(intel_crtc); intel_enable_pipe(intel_crtc); assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); + intel_encoders_enable(crtc, pipe_config, old_state); } static void i9xx_set_pll_dividers(struct intel_crtc *crtc) @@ -6245,14 +6721,13 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc) I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); } -static void i9xx_crtc_enable(struct drm_crtc *crtc) +static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, + struct drm_atomic_state *old_state) { + struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; - struct intel_crtc_state *pipe_config = - to_intel_crtc_state(crtc->state); enum pipe pipe = intel_crtc->pipe; if (WARN_ON(intel_crtc->active)) @@ -6270,12 +6745,10 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; - if (!IS_GEN2(dev)) + if (!IS_GEN2(dev_priv)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->pre_enable) - encoder->pre_enable(encoder); + intel_encoders_pre_enable(crtc, pipe_config, old_state); i9xx_enable_pll(intel_crtc); @@ -6283,14 +6756,13 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_color_load_luts(&pipe_config->base); - intel_update_watermarks(crtc); + intel_update_watermarks(intel_crtc); intel_enable_pipe(intel_crtc); assert_vblank_disabled(crtc); drm_crtc_vblank_on(crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->enable(encoder); + intel_encoders_enable(crtc, pipe_config, old_state); } static void i9xx_pfit_disable(struct intel_crtc *crtc) @@ -6308,23 +6780,23 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc) I915_WRITE(PFIT_CONTROL, 0); } -static void i9xx_crtc_disable(struct drm_crtc *crtc) +static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, + struct drm_atomic_state *old_state) { + struct drm_crtc *crtc = old_crtc_state->base.crtc; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_encoder *encoder; int pipe = intel_crtc->pipe; /* * On gen2 planes are double buffered but the pipe isn't, so we must * wait for planes to fully turn off before disabling the pipe. */ - if (IS_GEN2(dev)) - intel_wait_for_vblank(dev, pipe); + if (IS_GEN2(dev_priv)) + intel_wait_for_vblank(dev_priv, pipe); - for_each_encoder_on_crtc(dev, crtc, encoder) - encoder->disable(encoder); + intel_encoders_disable(crtc, old_crtc_state, old_state); drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); @@ -6333,24 +6805,20 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) i9xx_pfit_disable(intel_crtc); - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->post_disable) - encoder->post_disable(encoder); + intel_encoders_post_disable(crtc, old_crtc_state, old_state); if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) chv_disable_pll(dev_priv, pipe); - else if (IS_VALLEYVIEW(dev)) + else if (IS_VALLEYVIEW(dev_priv)) vlv_disable_pll(dev_priv, pipe); else i9xx_disable_pll(intel_crtc); } - for_each_encoder_on_crtc(dev, crtc, encoder) - if (encoder->post_pll_disable) - encoder->post_pll_disable(encoder); + intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); - if (!IS_GEN2(dev)) + if (!IS_GEN2(dev_priv)) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); } @@ -6361,20 +6829,34 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum intel_display_power_domain domain; unsigned long domains; + struct drm_atomic_state *state; + struct intel_crtc_state *crtc_state; + int ret; if (!intel_crtc->active) return; - if (to_intel_plane_state(crtc->primary->state)->visible) { + if (to_intel_plane_state(crtc->primary->state)->base.visible) { WARN_ON(intel_crtc->flip_work); intel_pre_disable_primary_noatomic(crtc); intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); - to_intel_plane_state(crtc->primary->state)->visible = false; + to_intel_plane_state(crtc->primary->state)->base.visible = false; } - dev_priv->display.crtc_disable(crtc); + state = drm_atomic_state_alloc(crtc->dev); + state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; + + /* Everything's already locked, -EDEADLK can't happen. */ + crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); + ret = drm_atomic_add_affected_connectors(state, crtc); + + WARN_ON(IS_ERR(crtc_state) || ret); + + dev_priv->display.crtc_disable(crtc_state, state); + + drm_atomic_state_put(state); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", crtc->base.id, crtc->name); @@ -6390,7 +6872,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) encoder->base.crtc = NULL; intel_fbc_disable(intel_crtc); - intel_update_watermarks(crtc); + intel_update_watermarks(intel_crtc); intel_disable_shared_dpll(intel_crtc); domains = intel_crtc->enabled_power_domains; @@ -6516,6 +6998,7 @@ static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, struct intel_crtc_state *pipe_config) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state = pipe_config->base.state; struct intel_crtc *other_crtc; struct intel_crtc_state *other_crtc_state; @@ -6528,7 +7011,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, return -EINVAL; } - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { if (pipe_config->fdi_lanes > 2) { DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", pipe_config->fdi_lanes); @@ -6538,7 +7021,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, } } - if (INTEL_INFO(dev)->num_pipes == 2) + if (INTEL_INFO(dev_priv)->num_pipes == 2) return 0; /* Ivybridge 3 pipe is really complicated */ @@ -6549,7 +7032,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, if (pipe_config->fdi_lanes <= 2) return 0; - other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C)); + other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) @@ -6568,7 +7051,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, return -EINVAL; } - other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B)); + other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); other_crtc_state = intel_atomic_get_crtc_state(state, other_crtc); if (IS_ERR(other_crtc_state)) @@ -6679,7 +7162,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; int clock_limit = dev_priv->max_dotclk_freq; - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { clock_limit = dev_priv->max_cdclk_freq * 9 / 10; /* @@ -6713,11 +7196,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, /* Cantiga+ cannot handle modes with a hsync front porch of 0. * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. */ - if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && + if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) return -EINVAL; - if (HAS_IPS(dev)) + if (HAS_IPS(dev_priv)) hsw_compute_ips_config(crtc, pipe_config); if (pipe_config->has_pch_encoder) @@ -6726,10 +7209,9 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, return 0; } -static int skylake_get_display_clock_speed(struct drm_device *dev) +static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - uint32_t cdctl; + u32 cdctl; skl_dpll0_update(dev_priv); @@ -6788,9 +7270,8 @@ static void bxt_de_pll_update(struct drm_i915_private *dev_priv) dev_priv->cdclk_pll.ref; } -static int broxton_get_display_clock_speed(struct drm_device *dev) +static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 divider; int div, vco; @@ -6823,9 +7304,8 @@ static int broxton_get_display_clock_speed(struct drm_device *dev) return DIV_ROUND_CLOSEST(vco, div); } -static int broadwell_get_display_clock_speed(struct drm_device *dev) +static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); uint32_t lcpll = I915_READ(LCPLL_CTL); uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; @@ -6843,9 +7323,8 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev) return 675000; } -static int haswell_get_display_clock_speed(struct drm_device *dev) +static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); uint32_t lcpll = I915_READ(LCPLL_CTL); uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; @@ -6855,43 +7334,44 @@ static int haswell_get_display_clock_speed(struct drm_device *dev) return 450000; else if (freq == LCPLL_CLK_FREQ_450) return 450000; - else if (IS_HSW_ULT(dev)) + else if (IS_HSW_ULT(dev_priv)) return 337500; else return 540000; } -static int valleyview_get_display_clock_speed(struct drm_device *dev) +static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv) { - return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk", + return vlv_get_cck_clock_hpll(dev_priv, "cdclk", CCK_DISPLAY_CLOCK_CONTROL); } -static int ilk_get_display_clock_speed(struct drm_device *dev) +static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv) { return 450000; } -static int i945_get_display_clock_speed(struct drm_device *dev) +static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv) { return 400000; } -static int i915_get_display_clock_speed(struct drm_device *dev) +static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv) { return 333333; } -static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) +static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv) { return 200000; } -static int pnv_get_display_clock_speed(struct drm_device *dev) +static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv) { + struct pci_dev *pdev = dev_priv->drm.pdev; u16 gcfgc = 0; - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); + pci_read_config_word(pdev, GCFGC, &gcfgc); switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { case GC_DISPLAY_CLOCK_267_MHZ_PNV: @@ -6911,11 +7391,12 @@ static int pnv_get_display_clock_speed(struct drm_device *dev) } } -static int i915gm_get_display_clock_speed(struct drm_device *dev) +static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv) { + struct pci_dev *pdev = dev_priv->drm.pdev; u16 gcfgc = 0; - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); + pci_read_config_word(pdev, GCFGC, &gcfgc); if (gcfgc & GC_LOW_FREQUENCY_ENABLE) return 133333; @@ -6930,13 +7411,14 @@ static int i915gm_get_display_clock_speed(struct drm_device *dev) } } -static int i865_get_display_clock_speed(struct drm_device *dev) +static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv) { return 266667; } -static int i85x_get_display_clock_speed(struct drm_device *dev) +static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv) { + struct pci_dev *pdev = dev_priv->drm.pdev; u16 hpllcc = 0; /* @@ -6944,10 +7426,10 @@ static int i85x_get_display_clock_speed(struct drm_device *dev) * encoding is different :( * FIXME is this the right way to detect 852GM/852GMV? */ - if (dev->pdev->revision == 0x1) + if (pdev->revision == 0x1) return 133333; - pci_bus_read_config_word(dev->pdev->bus, + pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 3), HPLLCC, &hpllcc); /* Assume that the hardware is in the high speed state. This @@ -6972,14 +7454,13 @@ static int i85x_get_display_clock_speed(struct drm_device *dev) return 0; } -static int i830_get_display_clock_speed(struct drm_device *dev) +static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv) { return 133333; } -static unsigned int intel_hpll_vco(struct drm_device *dev) +static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); static const unsigned int blb_vco[8] = { [0] = 3200000, [1] = 4000000, @@ -7022,20 +7503,20 @@ static unsigned int intel_hpll_vco(struct drm_device *dev) uint8_t tmp = 0; /* FIXME other chipsets? */ - if (IS_GM45(dev)) + if (IS_GM45(dev_priv)) vco_table = ctg_vco; - else if (IS_G4X(dev)) + else if (IS_G4X(dev_priv)) vco_table = elk_vco; - else if (IS_CRESTLINE(dev)) + else if (IS_CRESTLINE(dev_priv)) vco_table = cl_vco; - else if (IS_PINEVIEW(dev)) + else if (IS_PINEVIEW(dev_priv)) vco_table = pnv_vco; - else if (IS_G33(dev)) + else if (IS_G33(dev_priv)) vco_table = blb_vco; else return 0; - tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO); + tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO); vco = vco_table[tmp & 0x7]; if (vco == 0) @@ -7046,12 +7527,13 @@ static unsigned int intel_hpll_vco(struct drm_device *dev) return vco; } -static int gm45_get_display_clock_speed(struct drm_device *dev) +static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv) { - unsigned int cdclk_sel, vco = intel_hpll_vco(dev); + struct pci_dev *pdev = dev_priv->drm.pdev; + unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv); uint16_t tmp = 0; - pci_read_config_word(dev->pdev, GCFGC, &tmp); + pci_read_config_word(pdev, GCFGC, &tmp); cdclk_sel = (tmp >> 12) & 0x1; @@ -7068,16 +7550,17 @@ static int gm45_get_display_clock_speed(struct drm_device *dev) } } -static int i965gm_get_display_clock_speed(struct drm_device *dev) +static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv) { + struct pci_dev *pdev = dev_priv->drm.pdev; static const uint8_t div_3200[] = { 16, 10, 8 }; static const uint8_t div_4000[] = { 20, 12, 10 }; static const uint8_t div_5333[] = { 24, 16, 14 }; const uint8_t *div_table; - unsigned int cdclk_sel, vco = intel_hpll_vco(dev); + unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv); uint16_t tmp = 0; - pci_read_config_word(dev->pdev, GCFGC, &tmp); + pci_read_config_word(pdev, GCFGC, &tmp); cdclk_sel = ((tmp >> 8) & 0x1f) - 1; @@ -7105,17 +7588,18 @@ fail: return 200000; } -static int g33_get_display_clock_speed(struct drm_device *dev) +static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv) { + struct pci_dev *pdev = dev_priv->drm.pdev; static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 }; static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 }; static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 }; static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 }; const uint8_t *div_table; - unsigned int cdclk_sel, vco = intel_hpll_vco(dev); + unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv); uint16_t tmp = 0; - pci_read_config_word(dev->pdev, GCFGC, &tmp); + pci_read_config_word(pdev, GCFGC, &tmp); cdclk_sel = (tmp >> 4) & 0x7; @@ -7201,10 +7685,10 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct dpll *reduced_clock) { - struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 fp, fp2 = 0; - if (IS_PINEVIEW(dev)) { + if (IS_PINEVIEW(dev_priv)) { fp = pnv_dpll_compute_fp(&crtc_state->dpll); if (reduced_clock) fp2 = pnv_dpll_compute_fp(reduced_clock); @@ -7272,12 +7756,11 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n, struct intel_link_m_n *m2_n2) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); int pipe = crtc->pipe; enum transcoder transcoder = crtc->config->cpu_transcoder; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(dev_priv) >= 5) { I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); @@ -7286,8 +7769,8 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, * for gen < 8) and if DRRS is supported (to make sure the * registers are not unnecessarily accessed). */ - if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) && - crtc->config->has_drrs) { + if (m2_n2 && (IS_CHERRYVIEW(dev_priv) || + INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) { I915_WRITE(PIPE_DATA_M2(transcoder), TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); @@ -7574,11 +8057,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc, * in cases where we need the PLL enabled even when @pipe is not going to * be enabled. */ -int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, +int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe, const struct dpll *dpll) { - struct intel_crtc *crtc = - to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); struct intel_crtc_state *pipe_config; pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); @@ -7589,7 +8071,7 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, pipe_config->pixel_multiplier = 1; pipe_config->dpll = *dpll; - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { chv_compute_dpll(crtc, pipe_config); chv_prepare_pll(crtc, pipe_config); chv_enable_pll(crtc, pipe_config); @@ -7612,20 +8094,19 @@ int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, * Disable the PLL for @pipe. To be used in cases where we need * the PLL enabled even when @pipe is not going to be enabled. */ -void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe) +void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe) { - if (IS_CHERRYVIEW(dev)) - chv_disable_pll(to_i915(dev), pipe); + if (IS_CHERRYVIEW(dev_priv)) + chv_disable_pll(dev_priv, pipe); else - vlv_disable_pll(to_i915(dev), pipe); + vlv_disable_pll(dev_priv, pipe); } static void i9xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, struct dpll *reduced_clock) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 dpll; struct dpll *clock = &crtc_state->dpll; @@ -7638,7 +8119,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, else dpll |= DPLLB_MODE_DAC_SERIAL; - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { + if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || IS_G33(dev_priv)) { dpll |= (crtc_state->pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; } @@ -7651,11 +8132,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, dpll |= DPLL_SDVO_HIGH_SPEED; /* compute bitmask from p1 value */ - if (IS_PINEVIEW(dev)) + if (IS_PINEVIEW(dev_priv)) dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; else { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; - if (IS_G4X(dev) && reduced_clock) + if (IS_G4X(dev_priv) && reduced_clock) dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; } switch (clock->p2) { @@ -7672,7 +8153,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } - if (INTEL_INFO(dev)->gen >= 4) + if (INTEL_GEN(dev_priv) >= 4) dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); if (crtc_state->sdvo_tv_clock) @@ -7686,7 +8167,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, dpll |= DPLL_VCO_ENABLE; crtc_state->dpll_hw_state.dpll = dpll; - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { u32 dpll_md = (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; crtc_state->dpll_hw_state.dpll_md = dpll_md; @@ -7717,7 +8198,8 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, dpll |= PLL_P2_DIVIDE_BY_4; } - if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) + if (!IS_I830(dev_priv) && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) dpll |= DPLL_DVO_2X_MODE; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && @@ -7732,8 +8214,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) { - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); enum pipe pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; @@ -7759,7 +8240,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) vsyncshift += adjusted_mode->crtc_htotal; } - if (INTEL_INFO(dev)->gen > 3) + if (INTEL_GEN(dev_priv) > 3) I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); I915_WRITE(HTOTAL(cpu_transcoder), @@ -7786,7 +8267,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is * documented on the DDI_FUNC_CTL register description, EDP Input Select * bits. */ - if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && + if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && (pipe == PIPE_B || pipe == PIPE_C)) I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); @@ -7882,8 +8363,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) { - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); uint32_t pipeconf; pipeconf = 0; @@ -7896,7 +8376,8 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) pipeconf |= PIPECONF_DOUBLE_WIDE; /* only g4x and later have fancy bpc/dither controls */ - if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv)) { /* Bspec claims that we can't use dithering for 30bpp pipes. */ if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) pipeconf |= PIPECONF_DITHER_EN | @@ -7918,7 +8399,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) } } - if (HAS_PIPE_CXSR(dev)) { + if (HAS_PIPE_CXSR(dev_priv)) { if (intel_crtc->lowfreq_avail) { DRM_DEBUG_KMS("enabling CxSR downclocking\n"); pipeconf |= PIPECONF_CXSR_DOWNCLOCK; @@ -7928,7 +8409,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) } if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { - if (INTEL_INFO(dev)->gen < 4 || + if (INTEL_GEN(dev_priv) < 4 || intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else @@ -7936,7 +8417,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) } else pipeconf |= PIPECONF_PROGRESSIVE; - if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && intel_crtc->config->limited_color_range) pipeconf |= PIPECONF_COLOR_RANGE_SELECT; @@ -8136,11 +8617,11 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc, static void i9xx_get_pfit_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); uint32_t tmp; - if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) + if (INTEL_GEN(dev_priv) <= 3 && + (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) return; tmp = I915_READ(PFIT_CONTROL); @@ -8148,7 +8629,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, return; /* Check whether the pfit is attached to our pipe. */ - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { if (crtc->pipe != PIPE_B) return; } else { @@ -8212,10 +8693,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { if (val & DISPPLANE_TILED) { plane_config->tiling = I915_TILING_X; - fb->modifier[0] = I915_FORMAT_MOD_X_TILED; + fb->modifier = I915_FORMAT_MOD_X_TILED; } } @@ -8224,7 +8705,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb->pixel_format = fourcc; fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { if (plane_config->tiling) offset = I915_READ(DSPTILEOFF(plane)); else @@ -8244,7 +8725,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, aligned_height = intel_fb_align_height(dev, fb->height, fb->pixel_format, - fb->modifier[0]); + fb->modifier); plane_config->size = fb->pitches[0] * aligned_height; @@ -8293,8 +8774,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, static bool i9xx_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; uint32_t tmp; bool ret; @@ -8312,7 +8792,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, if (!(tmp & PIPECONF_ENABLE)) goto out; - if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv)) { switch (tmp & PIPECONF_BPC_MASK) { case PIPECONF_6BPC: pipe_config->pipe_bpp = 18; @@ -8328,11 +8809,11 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, } } - if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && + if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && (tmp & PIPECONF_COLOR_RANGE_SELECT)) pipe_config->limited_color_range = true; - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(dev_priv) < 4) pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; intel_get_pipe_timings(crtc, pipe_config); @@ -8340,9 +8821,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, i9xx_get_pfit_config(crtc, pipe_config); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { /* No way to read it out on pipes B and C */ - if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A) + if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) tmp = dev_priv->chv_dpll_md[crtc->pipe]; else tmp = I915_READ(DPLL_MD(crtc->pipe)); @@ -8350,7 +8831,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; pipe_config->dpll_hw_state.dpll_md = tmp; - } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { + } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || + IS_G33(dev_priv)) { tmp = I915_READ(DPLL(crtc->pipe)); pipe_config->pixel_multiplier = ((tmp & SDVO_MULTIPLIER_MASK) @@ -8362,13 +8844,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = 1; } pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); - if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { /* * DPLL_DVO_2X_MODE must be enabled for both DPLLs * on 830. Filter it out here so that we don't * report errors due to that. */ - if (IS_I830(dev)) + if (IS_I830(dev_priv)) pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); @@ -8380,9 +8862,9 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, DPLL_PORTB_READY_MASK); } - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) chv_crtc_clock_get(crtc, pipe_config); - else if (IS_VALLEYVIEW(dev)) + else if (IS_VALLEYVIEW(dev_priv)) vlv_crtc_clock_get(crtc, pipe_config); else i9xx_crtc_clock_get(crtc, pipe_config); @@ -8433,7 +8915,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev) } } - if (HAS_PCH_IBX(dev)) { + if (HAS_PCH_IBX(dev_priv)) { has_ck505 = dev_priv->vbt.display_clock_mode; can_ssc = has_ck505; } else { @@ -8681,7 +9163,8 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) with_spread = true; - if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n")) + if (WARN(HAS_PCH_LPT_LP(dev_priv) && + with_fdi, "LP PCH doesn't have FDI\n")) with_fdi = false; mutex_lock(&dev_priv->sb_lock); @@ -8704,7 +9187,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, } } - reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; + reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); @@ -8720,7 +9203,7 @@ static void lpt_disable_clkout_dp(struct drm_device *dev) mutex_lock(&dev_priv->sb_lock); - reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; + reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); @@ -8828,9 +9311,11 @@ static void lpt_init_pch_refclk(struct drm_device *dev) */ void intel_init_pch_refclk(struct drm_device *dev) { - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + struct drm_i915_private *dev_priv = to_i915(dev); + + if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) ironlake_init_pch_refclk(dev); - else if (HAS_PCH_LPT(dev)) + else if (HAS_PCH_LPT(dev_priv)) lpt_init_pch_refclk(dev); } @@ -8959,7 +9444,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { if ((intel_panel_use_ssc(dev_priv) && dev_priv->vbt.lvds_ssc_freq == 100000) || - (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) + (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev))) factor = 25; } else if (crtc_state->sdvo_tv_clock) factor = 20; @@ -8995,6 +9480,24 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, if (intel_crtc_has_dp_encoder(crtc_state)) dpll |= DPLL_SDVO_HIGH_SPEED; + /* + * The high speed IO clock is only really required for + * SDVO/HDMI/DP, but we also enable it for CRT to make it + * possible to share the DPLL between CRT and HDMI. Enabling + * the clock needlessly does no real harm, except use up a + * bit of power potentially. + * + * We'll limit this to IVB with 3 pipes, since it has only two + * DPLLs and so DPLL sharing is the only way to get three pipes + * driving PCH ports at the same time. On SNB we could do this, + * and potentially avoid enabling the second DPLL, but it's not + * clear if it''s a win or loss power wise. No point in doing + * this on ILK at all since it has a fixed DPLL<->pipe mapping. + */ + if (INTEL_INFO(dev_priv)->num_pipes == 3 && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) + dpll |= DPLL_SDVO_HIGH_SPEED; + /* compute bitmask from p1 value */ dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; /* also FPA1 */ @@ -9115,11 +9618,10 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n, struct intel_link_m_n *m2_n2) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - if (INTEL_INFO(dev)->gen >= 5) { + if (INTEL_GEN(dev_priv) >= 5) { m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) @@ -9131,7 +9633,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, * gen < 8) and if DRRS is supported (to make sure the * registers are not unnecessarily read). */ - if (m2_n2 && INTEL_INFO(dev)->gen < 8 && + if (m2_n2 && INTEL_GEN(dev_priv) < 8 && crtc->config->has_drrs) { m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); @@ -9235,17 +9737,17 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, tiling = val & PLANE_CTL_TILED_MASK; switch (tiling) { case PLANE_CTL_TILED_LINEAR: - fb->modifier[0] = DRM_FORMAT_MOD_NONE; + fb->modifier = DRM_FORMAT_MOD_NONE; break; case PLANE_CTL_TILED_X: plane_config->tiling = I915_TILING_X; - fb->modifier[0] = I915_FORMAT_MOD_X_TILED; + fb->modifier = I915_FORMAT_MOD_X_TILED; break; case PLANE_CTL_TILED_Y: - fb->modifier[0] = I915_FORMAT_MOD_Y_TILED; + fb->modifier = I915_FORMAT_MOD_Y_TILED; break; case PLANE_CTL_TILED_YF: - fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED; + fb->modifier = I915_FORMAT_MOD_Yf_TILED; break; default: MISSING_CASE(tiling); @@ -9262,13 +9764,13 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb->width = ((val >> 0) & 0x1fff) + 1; val = I915_READ(PLANE_STRIDE(pipe, 0)); - stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0], + stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier, fb->pixel_format); fb->pitches[0] = (val & 0x3ff) * stride_mult; aligned_height = intel_fb_align_height(dev, fb->height, fb->pixel_format, - fb->modifier[0]); + fb->modifier); plane_config->size = fb->pitches[0] * aligned_height; @@ -9281,7 +9783,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, return; error: - kfree(fb); + kfree(intel_fb); } static void ironlake_get_pfit_config(struct intel_crtc *crtc, @@ -9301,7 +9803,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, /* We currently do not free assignements of panel fitters on * ivb/hsw (since we don't use the higher upscaling modes which * differentiates them) so just WARN about this case for now. */ - if (IS_GEN7(dev)) { + if (IS_GEN7(dev_priv)) { WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != PF_PIPE_SEL_IVB(crtc->pipe)); } @@ -9333,10 +9835,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { if (val & DISPPLANE_TILED) { plane_config->tiling = I915_TILING_X; - fb->modifier[0] = I915_FORMAT_MOD_X_TILED; + fb->modifier = I915_FORMAT_MOD_X_TILED; } } @@ -9346,7 +9848,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; base = I915_READ(DSPSURF(pipe)) & 0xfffff000; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { offset = I915_READ(DSPOFFSET(pipe)); } else { if (plane_config->tiling) @@ -9365,7 +9867,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, aligned_height = intel_fb_align_height(dev, fb->height, fb->pixel_format, - fb->modifier[0]); + fb->modifier); plane_config->size = fb->pitches[0] * aligned_height; @@ -9487,10 +9989,10 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); - I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); + I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n"); I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, "CPU PWM1 enabled\n"); - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, "CPU PWM2 enabled\n"); I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, @@ -9510,9 +10012,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) { - struct drm_device *dev = &dev_priv->drm; - - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) return I915_READ(D_COMP_HSW); else return I915_READ(D_COMP_BDW); @@ -9520,13 +10020,11 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) { - struct drm_device *dev = &dev_priv->drm; - - if (IS_HASWELL(dev)) { + if (IS_HASWELL(dev_priv)) { mutex_lock(&dev_priv->rps.hw_lock); if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) - DRM_ERROR("Failed to write to D_COMP\n"); + DRM_DEBUG_KMS("Failed to write to D_COMP\n"); mutex_unlock(&dev_priv->rps.hw_lock); } else { I915_WRITE(D_COMP_BDW, val); @@ -9637,7 +10135,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) } intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_update_cdclk(&dev_priv->drm); + intel_update_cdclk(dev_priv); } /* @@ -9670,7 +10168,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv) DRM_DEBUG_KMS("Enabling package C8+\n"); - if (HAS_PCH_LPT_LP(dev)) { + if (HAS_PCH_LPT_LP(dev_priv)) { val = I915_READ(SOUTH_DSPCLK_GATE_D); val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; I915_WRITE(SOUTH_DSPCLK_GATE_D, val); @@ -9690,7 +10188,7 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv) hsw_restore_lcpll(dev_priv); lpt_init_pch_refclk(dev); - if (HAS_PCH_LPT_LP(dev)) { + if (HAS_PCH_LPT_LP(dev_priv)) { val = I915_READ(SOUTH_DSPCLK_GATE_D); val |= PCH_LP_PARTITION_LEVEL_DISABLE; I915_WRITE(SOUTH_DSPCLK_GATE_D, val); @@ -9707,6 +10205,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) bxt_set_cdclk(to_i915(dev), req_cdclk); } +static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state, + int pixel_rate) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) + pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); + + /* BSpec says "Do not use DisplayPort with CDCLK less than + * 432 MHz, audio enabled, port width x4, and link rate + * HBR2 (5.4 GHz), or else there may be audio corruption or + * screen corruption." + */ + if (intel_crtc_has_dp_encoder(crtc_state) && + crtc_state->has_audio && + crtc_state->port_clock >= 540000 && + crtc_state->lane_count == 4) + pixel_rate = max(432000, pixel_rate); + + return pixel_rate; +} + /* compute the max rate for new configuration */ static int ilk_max_pixel_rate(struct drm_atomic_state *state) { @@ -9732,9 +10253,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state) pixel_rate = ilk_pipe_pixel_rate(crtc_state); - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) - pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); + if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv)) + pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state, + pixel_rate); intel_state->min_pixclk[i] = pixel_rate; } @@ -9817,7 +10338,7 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); - intel_update_cdclk(dev); + intel_update_cdclk(dev_priv); WARN(cdclk != dev_priv->cdclk_freq, "cdclk requested %d kHz but got %d kHz\n", @@ -9934,15 +10455,12 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, switch (port) { case PORT_A: - pipe_config->ddi_pll_sel = SKL_DPLL0; id = DPLL_ID_SKL_DPLL0; break; case PORT_B: - pipe_config->ddi_pll_sel = SKL_DPLL1; id = DPLL_ID_SKL_DPLL1; break; case PORT_C: - pipe_config->ddi_pll_sel = SKL_DPLL2; id = DPLL_ID_SKL_DPLL2; break; default: @@ -9961,25 +10479,10 @@ static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, u32 temp; temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); - pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); + id = temp >> (port * 3 + 1); - switch (pipe_config->ddi_pll_sel) { - case SKL_DPLL0: - id = DPLL_ID_SKL_DPLL0; - break; - case SKL_DPLL1: - id = DPLL_ID_SKL_DPLL1; - break; - case SKL_DPLL2: - id = DPLL_ID_SKL_DPLL2; - break; - case SKL_DPLL3: - id = DPLL_ID_SKL_DPLL3; - break; - default: - MISSING_CASE(pipe_config->ddi_pll_sel); + if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) return; - } pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); } @@ -9989,10 +10492,9 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, struct intel_crtc_state *pipe_config) { enum intel_dpll_id id; + uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); - pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); - - switch (pipe_config->ddi_pll_sel) { + switch (ddi_pll_sel) { case PORT_CLK_SEL_WRPLL1: id = DPLL_ID_WRPLL1; break; @@ -10012,7 +10514,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, id = DPLL_ID_LCPLL_2700; break; default: - MISSING_CASE(pipe_config->ddi_pll_sel); + MISSING_CASE(ddi_pll_sel); /* fall through */ case PORT_CLK_SEL_NONE: return; @@ -10123,8 +10625,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, static void haswell_get_ddi_port_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll; enum port port; uint32_t tmp; @@ -10133,9 +10634,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) skylake_get_ddi_pll(dev_priv, port, pipe_config); - else if (IS_BROXTON(dev)) + else if (IS_BROXTON(dev_priv)) bxt_get_ddi_pll(dev_priv, port, pipe_config); else haswell_get_ddi_pll(dev_priv, port, pipe_config); @@ -10151,7 +10652,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, * DDI E. So just check whether this pipe is wired to DDI E and whether * the PCH transcoder is on. */ - if (INTEL_INFO(dev)->gen < 9 && + if (INTEL_GEN(dev_priv) < 9 && (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { pipe_config->has_pch_encoder = true; @@ -10166,8 +10667,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, static bool haswell_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum intel_display_power_domain power_domain; unsigned long power_domain_mask; bool active; @@ -10200,11 +10700,9 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; - if (INTEL_INFO(dev)->gen >= 9) { - skl_init_scalers(dev, crtc, pipe_config); - } + if (INTEL_GEN(dev_priv) >= 9) { + skl_init_scalers(dev_priv, crtc, pipe_config); - if (INTEL_INFO(dev)->gen >= 9) { pipe_config->scaler_state.scaler_id = -1; pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); } @@ -10212,13 +10710,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { power_domain_mask |= BIT(power_domain); - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) skylake_get_pfit_config(crtc, pipe_config); else ironlake_get_pfit_config(crtc, pipe_config); } - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && (I915_READ(IPS_CTL) & IPS_ENABLE); @@ -10245,7 +10743,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t cntl = 0, size = 0; - if (plane_state && plane_state->visible) { + if (plane_state && plane_state->base.visible) { unsigned int width = plane_state->base.crtc_w; unsigned int height = plane_state->base.crtc_h; unsigned int stride = roundup_pow_of_two(width) * 4; @@ -10309,7 +10807,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, int pipe = intel_crtc->pipe; uint32_t cntl = 0; - if (plane_state && plane_state->visible) { + if (plane_state && plane_state->base.visible) { cntl = MCURSOR_GAMMA_ENABLE; switch (plane_state->base.crtc_w) { case 64: @@ -10327,10 +10825,10 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, } cntl |= pipe << 28; /* Connect to correct pipe */ - if (HAS_DDI(dev)) + if (HAS_DDI(dev_priv)) cntl |= CURSOR_PIPE_CSC_ENABLE; - if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) + if (plane_state->base.rotation & DRM_ROTATE_180) cntl |= CURSOR_ROTATE_180; } @@ -10375,8 +10873,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, pos |= y << CURSOR_Y_SHIFT; /* ILK+ do this automagically */ - if (HAS_GMCH_DISPLAY(dev) && - plane_state->base.rotation == BIT(DRM_ROTATE_180)) { + if (HAS_GMCH_DISPLAY(dev_priv) && + plane_state->base.rotation & DRM_ROTATE_180) { base += (plane_state->base.crtc_h * plane_state->base.crtc_w - 1) * 4; } @@ -10384,13 +10882,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, I915_WRITE(CURPOS(pipe), pos); - if (IS_845G(dev) || IS_I865G(dev)) + if (IS_845G(dev_priv) || IS_I865G(dev_priv)) i845_update_cursor(crtc, base, plane_state); else i9xx_update_cursor(crtc, base, plane_state); } -static bool cursor_size_ok(struct drm_device *dev, +static bool cursor_size_ok(struct drm_i915_private *dev_priv, uint32_t width, uint32_t height) { if (width == 0 || height == 0) @@ -10402,11 +10900,11 @@ static bool cursor_size_ok(struct drm_device *dev, * the precision of the register. Everything else requires * square cursors, limited to a few power-of-two sizes. */ - if (IS_845G(dev) || IS_I865G(dev)) { + if (IS_845G(dev_priv) || IS_I865G(dev_priv)) { if ((width & 63) != 0) return false; - if (width > (IS_845G(dev) ? 64 : 512)) + if (width > (IS_845G(dev_priv) ? 64 : 512)) return false; if (height > 1023) @@ -10415,7 +10913,7 @@ static bool cursor_size_ok(struct drm_device *dev, switch (width | height) { case 256: case 128: - if (IS_GEN2(dev)) + if (IS_GEN2(dev_priv)) return false; case 64: break; @@ -10509,7 +11007,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, fb = intel_framebuffer_create(dev, &mode_cmd, obj); if (IS_ERR(fb)) - drm_gem_object_unreference_unlocked(&obj->base); + i915_gem_object_put(obj); return fb; } @@ -10594,6 +11092,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = NULL; struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_framebuffer *fb; struct drm_mode_config *config = &dev->mode_config; struct drm_atomic_state *state = NULL, *restore_state = NULL; @@ -10746,13 +11245,18 @@ found: old->restore_state = restore_state; /* let the connector get through one full cycle before testing */ - intel_wait_for_vblank(dev, intel_crtc->pipe); + intel_wait_for_vblank(dev_priv, intel_crtc->pipe); return true; fail: - drm_atomic_state_free(state); - drm_atomic_state_free(restore_state); - restore_state = state = NULL; + if (state) { + drm_atomic_state_put(state); + state = NULL; + } + if (restore_state) { + drm_atomic_state_put(restore_state); + restore_state = NULL; + } if (ret == -EDEADLK) { drm_modeset_backoff(ctx); @@ -10780,10 +11284,9 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, return; ret = drm_atomic_commit(state); - if (ret) { + if (ret) DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); - drm_atomic_state_free(state); - } + drm_atomic_state_put(state); } static int i9xx_pll_refclk(struct drm_device *dev, @@ -10794,9 +11297,9 @@ static int i9xx_pll_refclk(struct drm_device *dev, if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) return dev_priv->vbt.lvds_ssc_freq; - else if (HAS_PCH_SPLIT(dev)) + else if (HAS_PCH_SPLIT(dev_priv)) return 120000; - else if (!IS_GEN2(dev)) + else if (!IS_GEN2(dev_priv)) return 96000; else return 48000; @@ -10821,7 +11324,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, fp = pipe_config->dpll_hw_state.fp1; clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; - if (IS_PINEVIEW(dev)) { + if (IS_PINEVIEW(dev_priv)) { clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; } else { @@ -10829,8 +11332,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; } - if (!IS_GEN2(dev)) { - if (IS_PINEVIEW(dev)) + if (!IS_GEN2(dev_priv)) { + if (IS_PINEVIEW(dev_priv)) clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); else @@ -10852,12 +11355,12 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, return; } - if (IS_PINEVIEW(dev)) + if (IS_PINEVIEW(dev_priv)) port_clock = pnv_calc_dpll_params(refclk, &clock); else port_clock = i9xx_calc_dpll_params(refclk, &clock); } else { - u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); + u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); if (is_lvds) { @@ -11020,13 +11523,13 @@ static void intel_unpin_work_fn(struct work_struct *__work) mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(work->old_fb, primary->state->rotation); - drm_gem_object_unreference(&work->pending_flip_obj->base); - - if (work->flip_queued_req) - i915_gem_request_assign(&work->flip_queued_req, NULL); + i915_gem_object_put(work->pending_flip_obj); mutex_unlock(&dev->struct_mutex); - intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit); + i915_gem_request_put(work->flip_queued_req); + + intel_frontbuffer_flip_complete(to_i915(dev), + to_intel_plane(primary)->frontbuffer_bit); intel_fbc_post_update(crtc); drm_framebuffer_unreference(work->old_fb); @@ -11047,10 +11550,8 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - unsigned reset_counter; - reset_counter = i915_reset_counter(&dev_priv->gpu_error); - if (crtc->reset_counter != reset_counter) + if (abort_flip_on_reset(crtc)) return true; /* @@ -11060,7 +11561,7 @@ static bool __pageflip_finished_cs(struct intel_crtc *crtc, * really needed there. But since ctg has the registers, * include it in the check anyway. */ - if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) + if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) return true; /* @@ -11123,8 +11624,7 @@ static bool pageflip_finished(struct intel_crtc *crtc, void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) { struct drm_device *dev = &dev_priv->drm; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); struct intel_flip_work *work; unsigned long flags; @@ -11137,12 +11637,12 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) * lost pageflips) so needs the full irqsave spinlocks. */ spin_lock_irqsave(&dev->event_lock, flags); - work = intel_crtc->flip_work; + work = crtc->flip_work; if (work != NULL && !is_mmio_work(work) && - pageflip_finished(intel_crtc, work)) - page_flip_completed(intel_crtc); + pageflip_finished(crtc, work)) + page_flip_completed(crtc); spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -11150,8 +11650,7 @@ void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) { struct drm_device *dev = &dev_priv->drm; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); struct intel_flip_work *work; unsigned long flags; @@ -11164,12 +11663,12 @@ void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) * lost pageflips) so needs the full irqsave spinlocks. */ spin_lock_irqsave(&dev->event_lock, flags); - work = intel_crtc->flip_work; + work = crtc->flip_work; if (work != NULL && is_mmio_work(work) && - pageflip_finished(intel_crtc, work)) - page_flip_completed(intel_crtc); + pageflip_finished(crtc, work)) + page_flip_completed(crtc); spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -11191,7 +11690,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; int ret; @@ -11207,13 +11706,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev, flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_DISPLAY_FLIP | + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); - intel_ring_emit(engine, 0); /* aux display base address, unused */ + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, 0); /* aux display base address, unused */ return 0; } @@ -11225,7 +11724,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; int ret; @@ -11238,13 +11737,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev, flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(engine, MI_NOOP); - intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | + intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, MI_NOOP); return 0; } @@ -11256,7 +11755,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; @@ -11270,11 +11769,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev, * Display Registers (which do not change across a page-flip) * so we need only reprogram the base address. */ - intel_ring_emit(engine, MI_DISPLAY_FLIP | + intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0]); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset | - obj->tiling_mode); + intel_ring_emit(ring, fb->pitches[0]); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset | + intel_fb_modifier_to_tiling(fb->modifier)); /* XXX Enabling the panel-fitter across page-flip is so far * untested on non-native modes, so ignore it for now. @@ -11282,7 +11781,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; - intel_ring_emit(engine, pf | pipesrc); + intel_ring_emit(ring, pf | pipesrc); return 0; } @@ -11294,7 +11793,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct intel_ring *ring = req->ring; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; @@ -11304,10 +11803,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev, if (ret) return ret; - intel_ring_emit(engine, MI_DISPLAY_FLIP | + intel_ring_emit(ring, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, fb->pitches[0] | + intel_fb_modifier_to_tiling(fb->modifier)); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -11317,7 +11817,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; - intel_ring_emit(engine, pf | pipesrc); + intel_ring_emit(ring, pf | pipesrc); return 0; } @@ -11329,7 +11829,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *engine = req->engine; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_ring *ring = req->ring; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t plane_bit = 0; int len, ret; @@ -11350,14 +11851,14 @@ static int intel_gen7_queue_flip(struct drm_device *dev, } len = 4; - if (engine->id == RCS) { + if (req->engine->id == RCS) { len += 6; /* * On Gen 8, SRM is now taking an extra dword to accommodate * 48bits addresses, and we need a NOOP for the batch size to * stay even. */ - if (IS_GEN8(dev)) + if (IS_GEN8(dev_priv)) len += 2; } @@ -11388,30 +11889,32 @@ static int intel_gen7_queue_flip(struct drm_device *dev, * for the RCS also doesn't appear to drop events. Setting the DERRMR * to zero does lead to lockups within MI_DISPLAY_FLIP. */ - if (engine->id == RCS) { - intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(engine, DERRMR); - intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE | + if (req->engine->id == RCS) { + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(ring, DERRMR); + intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEC_PRI_FLIP_DONE)); - if (IS_GEN8(dev)) - intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 | + if (IS_GEN8(dev_priv)) + intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT); else - intel_ring_emit(engine, MI_STORE_REGISTER_MEM | + intel_ring_emit(ring, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); - intel_ring_emit_reg(engine, DERRMR); - intel_ring_emit(engine, engine->scratch.gtt_offset + 256); - if (IS_GEN8(dev)) { - intel_ring_emit(engine, 0); - intel_ring_emit(engine, MI_NOOP); + intel_ring_emit_reg(ring, DERRMR); + intel_ring_emit(ring, + i915_ggtt_offset(req->engine->scratch) + 256); + if (IS_GEN8(dev_priv)) { + intel_ring_emit(ring, 0); + intel_ring_emit(ring, MI_NOOP); } } - intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); - intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); - intel_ring_emit(engine, (MI_NOOP)); + intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); + intel_ring_emit(ring, fb->pitches[0] | + intel_fb_modifier_to_tiling(fb->modifier)); + intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset); + intel_ring_emit(ring, (MI_NOOP)); return 0; } @@ -11419,8 +11922,6 @@ static int intel_gen7_queue_flip(struct drm_device *dev, static bool use_mmio_flip(struct intel_engine_cs *engine, struct drm_i915_gem_object *obj) { - struct reservation_object *resv; - /* * This is not being used for older platforms, because * non-availability of flip done interrupt forces us to use @@ -11442,11 +11943,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine, else if (i915.enable_execlists) return true; - resv = i915_gem_object_get_dmabuf_resv(obj); - if (resv && !reservation_object_test_signaled_rcu(resv, false)) - return true; - - return engine != i915_gem_request_get_engine(obj->last_write_req); + return engine != i915_gem_object_last_write_engine(obj); } static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, @@ -11457,11 +11954,11 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_framebuffer *fb = intel_crtc->base.primary->fb; const enum pipe pipe = intel_crtc->pipe; - u32 ctl, stride, tile_height; + u32 ctl, stride = skl_plane_stride(fb, 0, rotation); ctl = I915_READ(PLANE_CTL(pipe, 0)); ctl &= ~PLANE_CTL_TILED_MASK; - switch (fb->modifier[0]) { + switch (fb->modifier) { case DRM_FORMAT_MOD_NONE: break; case I915_FORMAT_MOD_X_TILED: @@ -11474,21 +11971,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, ctl |= PLANE_CTL_TILED_YF; break; default: - MISSING_CASE(fb->modifier[0]); - } - - /* - * The stride is either expressed as a multiple of 64 bytes chunks for - * linear buffers or in number of tiles for tiled buffers. - */ - if (intel_rotation_90_or_270(rotation)) { - /* stride = Surface height in tiles */ - tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0); - stride = DIV_ROUND_UP(fb->height, tile_height); - } else { - stride = fb->pitches[0] / - intel_fb_stride_alignment(dev_priv, fb->modifier[0], - fb->pixel_format); + MISSING_CASE(fb->modifier); } /* @@ -11507,15 +11990,13 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_framebuffer *intel_fb = - to_intel_framebuffer(intel_crtc->base.primary->fb); - struct drm_i915_gem_object *obj = intel_fb->obj; + struct drm_framebuffer *fb = intel_crtc->base.primary->fb; i915_reg_t reg = DSPCNTR(intel_crtc->plane); u32 dspcntr; dspcntr = I915_READ(reg); - if (obj->tiling_mode != I915_TILING_NONE) + if (fb->modifier == I915_FORMAT_MOD_X_TILED) dspcntr |= DISPPLANE_TILED; else dspcntr &= ~DISPPLANE_TILED; @@ -11535,18 +12016,8 @@ static void intel_mmio_flip_work_func(struct work_struct *w) struct intel_framebuffer *intel_fb = to_intel_framebuffer(crtc->base.primary->fb); struct drm_i915_gem_object *obj = intel_fb->obj; - struct reservation_object *resv; - - if (work->flip_queued_req) - WARN_ON(__i915_wait_request(work->flip_queued_req, - false, NULL, - &dev_priv->rps.mmioflips)); - /* For framebuffer backed by dmabuf, wait for fence */ - resv = i915_gem_object_get_dmabuf_resv(obj); - if (resv) - WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false, - MAX_SCHEDULE_TIMEOUT) < 0); + WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0); intel_pipe_update_start(crtc); @@ -11609,8 +12080,7 @@ static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) { struct drm_device *dev = &dev_priv->drm; - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); struct intel_flip_work *work; WARN_ON(!in_interrupt()); @@ -11619,19 +12089,19 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) return; spin_lock(&dev->event_lock); - work = intel_crtc->flip_work; + work = crtc->flip_work; if (work != NULL && !is_mmio_work(work) && - __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) { + __pageflip_stall_check_cs(dev_priv, crtc, work)) { WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", - work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc)); - page_flip_completed(intel_crtc); + work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc)); + page_flip_completed(crtc); work = NULL; } if (work != NULL && !is_mmio_work(work) && - intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1) + intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1) intel_queue_rps_boost_for_request(work->flip_queued_req); spin_unlock(&dev->event_lock); } @@ -11651,7 +12121,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct intel_flip_work *work; struct intel_engine_cs *engine; bool mmio_flip; - struct drm_i915_gem_request *request = NULL; + struct drm_i915_gem_request *request; + struct i915_vma *vma; int ret; /* @@ -11670,7 +12141,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, * TILEOFF/LINOFF registers can't be changed via MI display flips. * Note that pitch changes could also affect these register. */ - if (INTEL_INFO(dev)->gen > 3 && + if (INTEL_GEN(dev_priv) > 3 && (fb->offsets[0] != crtc->primary->fb->offsets[0] || fb->pitches[0] != crtc->primary->fb->pitches[0])) return -EINVAL; @@ -11717,115 +12188,115 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, /* Reference the objects for the scheduled work. */ drm_framebuffer_reference(work->old_fb); - drm_gem_object_reference(&obj->base); crtc->primary->fb = fb; update_state_fb(crtc->primary); - intel_fbc_pre_update(intel_crtc, intel_crtc->config, - to_intel_plane_state(primary->state)); - - work->pending_flip_obj = obj; + work->pending_flip_obj = i915_gem_object_get(obj); ret = i915_mutex_lock_interruptible(dev); if (ret) goto cleanup; - intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error); - if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) { + intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error); + if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) { ret = -EIO; - goto cleanup; + goto unlock; } atomic_inc(&intel_crtc->unpin_work_count); - if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { - engine = &dev_priv->engine[BCS]; - if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + engine = dev_priv->engine[BCS]; + if (fb->modifier != old_fb->modifier) /* vlv: DISPLAY_FLIP fails to change tiling */ engine = NULL; - } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - engine = &dev_priv->engine[BCS]; - } else if (INTEL_INFO(dev)->gen >= 7) { - engine = i915_gem_request_get_engine(obj->last_write_req); + } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { + engine = dev_priv->engine[BCS]; + } else if (INTEL_GEN(dev_priv) >= 7) { + engine = i915_gem_object_last_write_engine(obj); if (engine == NULL || engine->id != RCS) - engine = &dev_priv->engine[BCS]; + engine = dev_priv->engine[BCS]; } else { - engine = &dev_priv->engine[RCS]; + engine = dev_priv->engine[RCS]; } mmio_flip = use_mmio_flip(engine, obj); - /* When using CS flips, we want to emit semaphores between rings. - * However, when using mmio flips we will create a task to do the - * synchronisation, so all we want here is to pin the framebuffer - * into the display plane and skip any waits. - */ - if (!mmio_flip) { - ret = i915_gem_object_sync(obj, engine, &request); - if (!ret && !request) { - request = i915_gem_request_alloc(engine, NULL); - ret = PTR_ERR_OR_ZERO(request); - } - - if (ret) - goto cleanup_pending; - } - - ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); - if (ret) + vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto cleanup_pending; + } - work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), - obj, 0); + work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation); work->gtt_offset += intel_crtc->dspaddr_offset; work->rotation = crtc->primary->state->rotation; + /* + * There's the potential that the next frame will not be compatible with + * FBC, so we want to call pre_update() before the actual page flip. + * The problem is that pre_update() caches some information about the fb + * object, so we want to do this only after the object is pinned. Let's + * be on the safe side and do this immediately before scheduling the + * flip. + */ + intel_fbc_pre_update(intel_crtc, intel_crtc->config, + to_intel_plane_state(primary->state)); + if (mmio_flip) { INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); + queue_work(system_unbound_wq, &work->mmio_work); + } else { + request = i915_gem_request_alloc(engine, engine->last_context); + if (IS_ERR(request)) { + ret = PTR_ERR(request); + goto cleanup_unpin; + } - i915_gem_request_assign(&work->flip_queued_req, - obj->last_write_req); + ret = i915_gem_request_await_object(request, obj, false); + if (ret) + goto cleanup_request; - schedule_work(&work->mmio_work); - } else { - i915_gem_request_assign(&work->flip_queued_req, request); ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, page_flip_flags); if (ret) - goto cleanup_unpin; + goto cleanup_request; intel_mark_page_flip_active(intel_crtc, work); + work->flip_queued_req = i915_gem_request_get(request); i915_add_request_no_flush(request); } + i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); i915_gem_track_fb(intel_fb_obj(old_fb), obj, to_intel_plane(primary)->frontbuffer_bit); mutex_unlock(&dev->struct_mutex); - intel_frontbuffer_flip_prepare(dev, + intel_frontbuffer_flip_prepare(to_i915(dev), to_intel_plane(primary)->frontbuffer_bit); trace_i915_flip_request(intel_crtc->plane, obj); return 0; +cleanup_request: + i915_add_request_no_flush(request); cleanup_unpin: intel_unpin_fb_obj(fb, crtc->primary->state->rotation); cleanup_pending: - if (!IS_ERR_OR_NULL(request)) - i915_add_request_no_flush(request); atomic_dec(&intel_crtc->unpin_work_count); +unlock: mutex_unlock(&dev->struct_mutex); cleanup: crtc->primary->fb = old_fb; update_state_fb(crtc->primary); - drm_gem_object_unreference_unlocked(&obj->base); + i915_gem_object_put(obj); drm_framebuffer_unreference(work->old_fb); spin_lock_irq(&dev->event_lock); @@ -11863,8 +12334,7 @@ retry: goto retry; } - if (ret) - drm_atomic_state_free(state); + drm_atomic_state_put(state); if (ret == 0 && event) { spin_lock_irq(&dev->event_lock); @@ -11893,18 +12363,18 @@ static bool intel_wm_need_update(struct drm_plane *plane, struct intel_plane_state *cur = to_intel_plane_state(plane->state); /* Update watermarks on tiling or size changes. */ - if (new->visible != cur->visible) + if (new->base.visible != cur->base.visible) return true; if (!cur->base.fb || !new->base.fb) return false; - if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] || + if (cur->base.fb->modifier != new->base.fb->modifier || cur->base.rotation != new->base.rotation || - drm_rect_width(&new->src) != drm_rect_width(&cur->src) || - drm_rect_height(&new->src) != drm_rect_height(&cur->src) || - drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || - drm_rect_height(&new->dst) != drm_rect_height(&cur->dst)) + drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || + drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || + drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || + drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) return true; return false; @@ -11912,10 +12382,10 @@ static bool intel_wm_need_update(struct drm_plane *plane, static bool needs_scaling(struct intel_plane_state *state) { - int src_w = drm_rect_width(&state->src) >> 16; - int src_h = drm_rect_height(&state->src) >> 16; - int dst_w = drm_rect_width(&state->dst); - int dst_h = drm_rect_height(&state->dst); + int src_w = drm_rect_width(&state->base.src) >> 16; + int src_h = drm_rect_height(&state->base.src) >> 16; + int dst_w = drm_rect_width(&state->base.dst); + int dst_h = drm_rect_height(&state->base.dst); return (src_w != dst_w || src_h != dst_h); } @@ -11938,7 +12408,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct drm_framebuffer *fb = plane_state->fb; int ret; - if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) { + if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) { ret = skl_update_scaler_plane( to_intel_crtc_state(crtc_state), to_intel_plane_state(plane_state)); @@ -11946,8 +12416,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, return ret; } - was_visible = old_plane_state->visible; - visible = to_intel_plane_state(plane_state)->visible; + was_visible = old_plane_state->base.visible; + visible = to_intel_plane_state(plane_state)->base.visible; if (!was_crtc_enabled && WARN_ON(was_visible)) was_visible = false; @@ -11963,7 +12433,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, * only combine the results from all planes in the current place? */ if (!is_crtc_enabled) - to_intel_plane_state(plane_state)->visible = visible = false; + to_intel_plane_state(plane_state)->base.visible = visible = false; if (!was_visible && !visible) return 0; @@ -12005,7 +12475,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, /* Pre-gen9 platforms need two-step watermark updates */ if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) && - INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks) + INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks) to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true; if (visible || was_visible) @@ -12017,7 +12487,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, * cstate->update_wm was already set above, so this flag will * take effect when we commit and program watermarks. */ - if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) && + if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) && needs_scaling(to_intel_plane_state(plane_state)) && !needs_scaling(old_plane_state)) pipe_config->disable_lp_wm = true; @@ -12110,7 +12580,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, * old state and the new state. We can program these * immediately. */ - ret = dev_priv->display.compute_intermediate_wm(crtc->dev, + ret = dev_priv->display.compute_intermediate_wm(dev, intel_crtc, pipe_config); if (ret) { @@ -12122,7 +12592,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; } - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { if (mode_changed) ret = skl_update_scaler_crtc(pipe_config); @@ -12167,22 +12637,22 @@ static void connected_sink_compute_bpp(struct intel_connector *connector, struct intel_crtc_state *pipe_config) { + const struct drm_display_info *info = &connector->base.display_info; int bpp = pipe_config->pipe_bpp; DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", - connector->base.base.id, - connector->base.name); + connector->base.base.id, + connector->base.name); /* Don't use an invalid EDID bpc value */ - if (connector->base.display_info.bpc && - connector->base.display_info.bpc * 3 < bpp) { + if (info->bpc != 0 && info->bpc * 3 < bpp) { DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", - bpp, connector->base.display_info.bpc*3); - pipe_config->pipe_bpp = connector->base.display_info.bpc*3; + bpp, info->bpc * 3); + pipe_config->pipe_bpp = info->bpc * 3; } /* Clamp bpp to 8 on screens without EDID 1.4 */ - if (connector->base.display_info.bpc == 0 && bpp > 24) { + if (info->bpc == 0 && bpp > 24) { DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", bpp); pipe_config->pipe_bpp = 24; @@ -12193,15 +12663,16 @@ static int compute_baseline_pipe_bpp(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_atomic_state *state; struct drm_connector *connector; struct drm_connector_state *connector_state; int bpp, i; - if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) + if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || + IS_CHERRYVIEW(dev_priv))) bpp = 10*3; - else if (INTEL_INFO(dev)->gen >= 5) + else if (INTEL_GEN(dev_priv) >= 5) bpp = 12*3; else bpp = 8*3; @@ -12234,77 +12705,84 @@ static void intel_dump_crtc_timings(const struct drm_display_mode *mode) mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); } +static inline void +intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id, + unsigned int lane_count, struct intel_link_m_n *m_n) +{ + DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", + id, lane_count, + m_n->gmch_m, m_n->gmch_n, + m_n->link_m, m_n->link_n, m_n->tu); +} + static void intel_dump_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config, const char *context) { struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_plane *plane; struct intel_plane *intel_plane; struct intel_plane_state *state; struct drm_framebuffer *fb; - DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n", - crtc->base.base.id, crtc->base.name, - context, pipe_config, pipe_name(crtc->pipe)); + DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n", + crtc->base.base.id, crtc->base.name, context); - DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); - DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", + DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", + transcoder_name(pipe_config->cpu_transcoder), pipe_config->pipe_bpp, pipe_config->dither); - DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", - pipe_config->has_pch_encoder, - pipe_config->fdi_lanes, - pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, - pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, - pipe_config->fdi_m_n.tu); - DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", - intel_crtc_has_dp_encoder(pipe_config), - pipe_config->lane_count, - pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, - pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, - pipe_config->dp_m_n.tu); - DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", - intel_crtc_has_dp_encoder(pipe_config), - pipe_config->lane_count, - pipe_config->dp_m2_n2.gmch_m, - pipe_config->dp_m2_n2.gmch_n, - pipe_config->dp_m2_n2.link_m, - pipe_config->dp_m2_n2.link_n, - pipe_config->dp_m2_n2.tu); + if (pipe_config->has_pch_encoder) + intel_dump_m_n_config(pipe_config, "fdi", + pipe_config->fdi_lanes, + &pipe_config->fdi_m_n); + + if (intel_crtc_has_dp_encoder(pipe_config)) { + intel_dump_m_n_config(pipe_config, "dp m_n", + pipe_config->lane_count, &pipe_config->dp_m_n); + if (pipe_config->has_drrs) + intel_dump_m_n_config(pipe_config, "dp m2_n2", + pipe_config->lane_count, + &pipe_config->dp_m2_n2); + } DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", - pipe_config->has_audio, - pipe_config->has_infoframe); + pipe_config->has_audio, pipe_config->has_infoframe); DRM_DEBUG_KMS("requested mode:\n"); drm_mode_debug_printmodeline(&pipe_config->base.mode); DRM_DEBUG_KMS("adjusted mode:\n"); drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); - DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); - DRM_DEBUG_KMS("pipe src size: %dx%d\n", + DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n", + pipe_config->port_clock, pipe_config->pipe_src_w, pipe_config->pipe_src_h); - DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", - crtc->num_scalers, - pipe_config->scaler_state.scaler_users, - pipe_config->scaler_state.scaler_id); - DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", - pipe_config->gmch_pfit.control, - pipe_config->gmch_pfit.pgm_ratios, - pipe_config->gmch_pfit.lvds_border_bits); - DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", - pipe_config->pch_pfit.pos, - pipe_config->pch_pfit.size, - pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); - DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); - DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); - if (IS_BROXTON(dev)) { - DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," + if (INTEL_GEN(dev_priv) >= 9) + DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", + crtc->num_scalers, + pipe_config->scaler_state.scaler_users, + pipe_config->scaler_state.scaler_id); + + if (HAS_GMCH_DISPLAY(dev_priv)) + DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", + pipe_config->gmch_pfit.control, + pipe_config->gmch_pfit.pgm_ratios, + pipe_config->gmch_pfit.lvds_border_bits); + else + DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", + pipe_config->pch_pfit.pos, + pipe_config->pch_pfit.size, + enableddisabled(pipe_config->pch_pfit.enabled)); + + DRM_DEBUG_KMS("ips: %i, double wide: %i\n", + pipe_config->ips_enabled, pipe_config->double_wide); + + if (IS_BROXTON(dev_priv)) { + DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", - pipe_config->ddi_pll_sel, pipe_config->dpll_hw_state.ebb0, pipe_config->dpll_hw_state.ebb4, pipe_config->dpll_hw_state.pll0, @@ -12316,16 +12794,14 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->dpll_hw_state.pll9, pipe_config->dpll_hw_state.pll10, pipe_config->dpll_hw_state.pcsdw12); - } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { - DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " + } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + DRM_DEBUG_KMS("dpll_hw_state: " "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", - pipe_config->ddi_pll_sel, pipe_config->dpll_hw_state.ctrl1, pipe_config->dpll_hw_state.cfgcr1, pipe_config->dpll_hw_state.cfgcr2); - } else if (HAS_DDI(dev)) { - DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", - pipe_config->ddi_pll_sel, + } else if (HAS_DDI(dev_priv)) { + DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", pipe_config->dpll_hw_state.wrpll, pipe_config->dpll_hw_state.spll); } else { @@ -12339,6 +12815,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, DRM_DEBUG_KMS("planes on this crtc\n"); list_for_each_entry(plane, &dev->mode_config.plane_list, head) { + struct drm_format_name_buf format_name; intel_plane = to_intel_plane(plane); if (intel_plane->pipe != crtc->pipe) continue; @@ -12351,19 +12828,20 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, continue; } - DRM_DEBUG_KMS("[PLANE:%d:%s] enabled", - plane->base.id, plane->name); - DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s", + DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n", + plane->base.id, plane->name, fb->base.id, fb->width, fb->height, - drm_get_format_name(fb->pixel_format)); - DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", - state->scaler_id, - state->src.x1 >> 16, state->src.y1 >> 16, - drm_rect_width(&state->src) >> 16, - drm_rect_height(&state->src) >> 16, - state->dst.x1, state->dst.y1, - drm_rect_width(&state->dst), - drm_rect_height(&state->dst)); + drm_get_format_name(fb->pixel_format, &format_name)); + if (INTEL_GEN(dev_priv) >= 9) + DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", + state->scaler_id, + state->base.src.x1 >> 16, + state->base.src.y1 >> 16, + drm_rect_width(&state->base.src) >> 16, + drm_rect_height(&state->base.src) >> 16, + state->base.dst.x1, state->base.dst.y1, + drm_rect_width(&state->base.dst), + drm_rect_height(&state->base.dst)); } } @@ -12372,6 +12850,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct drm_connector *connector; unsigned int used_ports = 0; + unsigned int used_mst_ports = 0; /* * Walk the connector list instead of the encoder @@ -12396,7 +12875,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) switch (encoder->type) { unsigned int port_mask; case INTEL_OUTPUT_UNKNOWN: - if (WARN_ON(!HAS_DDI(dev))) + if (WARN_ON(!HAS_DDI(to_i915(dev)))) break; case INTEL_OUTPUT_DP: case INTEL_OUTPUT_HDMI: @@ -12408,11 +12887,20 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state) return false; used_ports |= port_mask; + break; + case INTEL_OUTPUT_DP_MST: + used_mst_ports |= + 1 << enc_to_mst(&encoder->base)->primary->port; + break; default: break; } } + /* can't mix MST and SST/HDMI on the same port */ + if (used_ports & used_mst_ports) + return false; + return true; } @@ -12423,7 +12911,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) struct intel_crtc_scaler_state scaler_state; struct intel_dpll_hw_state dpll_hw_state; struct intel_shared_dpll *shared_dpll; - uint32_t ddi_pll_sel; bool force_thru; /* FIXME: before the switch to atomic started, a new pipe_config was @@ -12435,7 +12922,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) scaler_state = crtc_state->scaler_state; shared_dpll = crtc_state->shared_dpll; dpll_hw_state = crtc_state->dpll_hw_state; - ddi_pll_sel = crtc_state->ddi_pll_sel; force_thru = crtc_state->pch_pfit.force_thru; memset(crtc_state, 0, sizeof *crtc_state); @@ -12444,7 +12930,6 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) crtc_state->scaler_state = scaler_state; crtc_state->shared_dpll = shared_dpll; crtc_state->dpll_hw_state = dpll_hw_state; - crtc_state->ddi_pll_sel = ddi_pll_sel; crtc_state->pch_pfit.force_thru = force_thru; } @@ -12532,7 +13017,7 @@ encoder_retry: encoder = to_intel_encoder(connector_state->best_encoder); - if (!(encoder->compute_config(encoder, pipe_config))) { + if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { DRM_DEBUG_KMS("Encoder config failure\n"); goto fail; } @@ -12620,12 +13105,6 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2) return false; } -#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ - list_for_each_entry((intel_crtc), \ - &(dev)->mode_config.crtc_list, \ - base.head) \ - for_each_if (mask & (1 <<(intel_crtc)->pipe)) - static bool intel_compare_m_n(unsigned int m, unsigned int n, unsigned int m2, unsigned int n2, @@ -12677,7 +13156,7 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n, } static bool -intel_pipe_config_compare(struct drm_device *dev, +intel_pipe_config_compare(struct drm_i915_private *dev_priv, struct intel_crtc_state *current_config, struct intel_crtc_state *pipe_config, bool adjust) @@ -12801,7 +13280,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(lane_count); PIPE_CONF_CHECK_X(lane_lat_optim_mask); - if (INTEL_INFO(dev)->gen < 8) { + if (INTEL_GEN(dev_priv) < 8) { PIPE_CONF_CHECK_M_N(dp_m_n); if (current_config->has_drrs) @@ -12827,8 +13306,8 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(pixel_multiplier); PIPE_CONF_CHECK_I(has_hdmi_sink); - if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || - IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) PIPE_CONF_CHECK_I(limited_color_range); PIPE_CONF_CHECK_I(has_infoframe); @@ -12850,7 +13329,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_X(gmch_pfit.control); /* pfit ratios are autocomputed by the hw on gen4+ */ - if (INTEL_INFO(dev)->gen < 4) + if (INTEL_GEN(dev_priv) < 4) PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); @@ -12868,13 +13347,11 @@ intel_pipe_config_compare(struct drm_device *dev, } /* BDW+ don't expose a synchronous way to read the state */ - if (IS_HASWELL(dev)) + if (IS_HASWELL(dev_priv)) PIPE_CONF_CHECK_I(ips_enabled); PIPE_CONF_CHECK_I(double_wide); - PIPE_CONF_CHECK_X(ddi_pll_sel); - PIPE_CONF_CHECK_P(shared_dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); @@ -12889,7 +13366,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_X(dsi_pll.ctrl); PIPE_CONF_CHECK_X(dsi_pll.div); - if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) + if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) PIPE_CONF_CHECK_I(pipe_bpp); PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); @@ -12927,54 +13404,130 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, static void verify_wm_state(struct drm_crtc *crtc, struct drm_crtc_state *new_state) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct skl_ddb_allocation hw_ddb, *sw_ddb; - struct skl_ddb_entry *hw_entry, *sw_entry; + struct skl_pipe_wm hw_wm, *sw_wm; + struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; + struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); const enum pipe pipe = intel_crtc->pipe; - int plane; + int plane, level, max_level = ilk_wm_max_level(dev_priv); - if (INTEL_INFO(dev)->gen < 9 || !new_state->active) + if (INTEL_GEN(dev_priv) < 9 || !new_state->active) return; + skl_pipe_wm_get_hw_state(crtc, &hw_wm); + sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; + skl_ddb_get_hw_state(dev_priv, &hw_ddb); sw_ddb = &dev_priv->wm.skl_hw.ddb; /* planes */ - for_each_plane(dev_priv, pipe, plane) { - hw_entry = &hw_ddb.plane[pipe][plane]; - sw_entry = &sw_ddb->plane[pipe][plane]; + for_each_universal_plane(dev_priv, pipe, plane) { + hw_plane_wm = &hw_wm.planes[plane]; + sw_plane_wm = &sw_wm->planes[plane]; - if (skl_ddb_entry_equal(hw_entry, sw_entry)) - continue; + /* Watermarks */ + for (level = 0; level <= max_level; level++) { + if (skl_wm_level_equals(&hw_plane_wm->wm[level], + &sw_plane_wm->wm[level])) + continue; + + DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + pipe_name(pipe), plane + 1, level, + sw_plane_wm->wm[level].plane_en, + sw_plane_wm->wm[level].plane_res_b, + sw_plane_wm->wm[level].plane_res_l, + hw_plane_wm->wm[level].plane_en, + hw_plane_wm->wm[level].plane_res_b, + hw_plane_wm->wm[level].plane_res_l); + } + + if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, + &sw_plane_wm->trans_wm)) { + DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + pipe_name(pipe), plane + 1, + sw_plane_wm->trans_wm.plane_en, + sw_plane_wm->trans_wm.plane_res_b, + sw_plane_wm->trans_wm.plane_res_l, + hw_plane_wm->trans_wm.plane_en, + hw_plane_wm->trans_wm.plane_res_b, + hw_plane_wm->trans_wm.plane_res_l); + } - DRM_ERROR("mismatch in DDB state pipe %c plane %d " - "(expected (%u,%u), found (%u,%u))\n", - pipe_name(pipe), plane + 1, - sw_entry->start, sw_entry->end, - hw_entry->start, hw_entry->end); + /* DDB */ + hw_ddb_entry = &hw_ddb.plane[pipe][plane]; + sw_ddb_entry = &sw_ddb->plane[pipe][plane]; + + if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { + DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", + pipe_name(pipe), plane + 1, + sw_ddb_entry->start, sw_ddb_entry->end, + hw_ddb_entry->start, hw_ddb_entry->end); + } } - /* cursor */ - hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; - sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; + /* + * cursor + * If the cursor plane isn't active, we may not have updated it's ddb + * allocation. In that case since the ddb allocation will be updated + * once the plane becomes visible, we can skip this check + */ + if (intel_crtc->cursor_addr) { + hw_plane_wm = &hw_wm.planes[PLANE_CURSOR]; + sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; - if (!skl_ddb_entry_equal(hw_entry, sw_entry)) { - DRM_ERROR("mismatch in DDB state pipe %c cursor " - "(expected (%u,%u), found (%u,%u))\n", - pipe_name(pipe), - sw_entry->start, sw_entry->end, - hw_entry->start, hw_entry->end); + /* Watermarks */ + for (level = 0; level <= max_level; level++) { + if (skl_wm_level_equals(&hw_plane_wm->wm[level], + &sw_plane_wm->wm[level])) + continue; + + DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + pipe_name(pipe), level, + sw_plane_wm->wm[level].plane_en, + sw_plane_wm->wm[level].plane_res_b, + sw_plane_wm->wm[level].plane_res_l, + hw_plane_wm->wm[level].plane_en, + hw_plane_wm->wm[level].plane_res_b, + hw_plane_wm->wm[level].plane_res_l); + } + + if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, + &sw_plane_wm->trans_wm)) { + DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", + pipe_name(pipe), + sw_plane_wm->trans_wm.plane_en, + sw_plane_wm->trans_wm.plane_res_b, + sw_plane_wm->trans_wm.plane_res_l, + hw_plane_wm->trans_wm.plane_en, + hw_plane_wm->trans_wm.plane_res_b, + hw_plane_wm->trans_wm.plane_res_l); + } + + /* DDB */ + hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; + sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; + + if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { + DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", + pipe_name(pipe), + sw_ddb_entry->start, sw_ddb_entry->end, + hw_ddb_entry->start, hw_ddb_entry->end); + } } } static void -verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc) +verify_connector_state(struct drm_device *dev, + struct drm_atomic_state *state, + struct drm_crtc *crtc) { struct drm_connector *connector; + struct drm_connector_state *old_conn_state; + int i; - drm_for_each_connector(connector, dev) { + for_each_connector_in_state(state, connector, old_conn_state, i) { struct drm_encoder *encoder = connector->encoder; struct drm_connector_state *state = connector->state; @@ -13089,7 +13642,7 @@ verify_crtc_state(struct drm_crtc *crtc, intel_pipe_config_sanity_check(dev_priv, pipe_config); sw_config = to_intel_crtc_state(crtc->state); - if (!intel_pipe_config_compare(dev, sw_config, + if (!intel_pipe_config_compare(dev_priv, sw_config, pipe_config, false)) { I915_STATE_WARN(1, "pipe state doesn't match!\n"); intel_dump_pipe_config(intel_crtc, pipe_config, @@ -13182,15 +13735,16 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, static void intel_modeset_verify_crtc(struct drm_crtc *crtc, - struct drm_crtc_state *old_state, - struct drm_crtc_state *new_state) + struct drm_atomic_state *state, + struct drm_crtc_state *old_state, + struct drm_crtc_state *new_state) { if (!needs_modeset(new_state) && !to_intel_crtc_state(new_state)->update_pipe) return; verify_wm_state(crtc, new_state); - verify_connector_state(crtc->dev, crtc); + verify_connector_state(crtc->dev, state, crtc); verify_crtc_state(crtc, old_state, new_state); verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); } @@ -13206,16 +13760,17 @@ verify_disabled_dpll_state(struct drm_device *dev) } static void -intel_modeset_verify_disabled(struct drm_device *dev) +intel_modeset_verify_disabled(struct drm_device *dev, + struct drm_atomic_state *state) { verify_encoder_state(dev); - verify_connector_state(dev, NULL); + verify_connector_state(dev, state, NULL); verify_disabled_dpll_state(dev); } static void update_scanline_offset(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); /* * The scanline counter increments at the leading edge of hsync. @@ -13235,7 +13790,7 @@ static void update_scanline_offset(struct intel_crtc *crtc) * there's an extra 1 line difference. So we need to add two instead of * one to the value. */ - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; int vtotal; @@ -13244,7 +13799,7 @@ static void update_scanline_offset(struct intel_crtc *crtc) vtotal /= 2; crtc->scanline_offset = vtotal - 1; - } else if (HAS_DDI(dev) && + } else if (HAS_DDI(dev_priv) && intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { crtc->scanline_offset = 2; } else @@ -13429,8 +13984,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state) DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n", intel_state->cdclk, intel_state->dev_cdclk); - } else + } else { to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq; + } intel_modeset_clear_plls(state); @@ -13507,7 +14063,7 @@ static int intel_atomic_check(struct drm_device *dev, } if (i915.fastboot && - intel_pipe_config_compare(dev, + intel_pipe_config_compare(dev_priv, to_intel_crtc_state(crtc->state), pipe_config, true)) { crtc_state->mode_changed = false; @@ -13531,8 +14087,9 @@ static int intel_atomic_check(struct drm_device *dev, if (ret) return ret; - } else - intel_state->cdclk = dev_priv->cdclk_freq; + } else { + intel_state->cdclk = dev_priv->atomic_cdclk_freq; + } ret = drm_atomic_helper_check_planes(dev, state); if (ret) @@ -13543,13 +14100,10 @@ static int intel_atomic_check(struct drm_device *dev, } static int intel_atomic_prepare_commit(struct drm_device *dev, - struct drm_atomic_state *state, - bool nonblock) + struct drm_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_plane_state *plane_state; struct drm_crtc_state *crtc_state; - struct drm_plane *plane; struct drm_crtc *crtc; int i, ret; @@ -13572,27 +14126,6 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, ret = drm_atomic_helper_prepare_planes(dev, state); mutex_unlock(&dev->struct_mutex); - if (!ret && !nonblock) { - for_each_plane_in_state(state, plane, plane_state, i) { - struct intel_plane_state *intel_plane_state = - to_intel_plane_state(plane_state); - - if (!intel_plane_state->wait_req) - continue; - - ret = __i915_wait_request(intel_plane_state->wait_req, - true, NULL, NULL); - if (ret) { - /* Any hang should be swallowed by the wait */ - WARN_ON(ret == -EIO); - mutex_lock(&dev->struct_mutex); - drm_atomic_helper_cleanup_planes(dev, state); - mutex_unlock(&dev->struct_mutex); - break; - } - } - } - return ret; } @@ -13618,22 +14151,24 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev, return; for_each_pipe(dev_priv, pipe) { - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, + pipe); if (!((1 << pipe) & crtc_mask)) continue; - ret = drm_crtc_vblank_get(crtc); + ret = drm_crtc_vblank_get(&crtc->base); if (WARN_ON(ret != 0)) { crtc_mask &= ~(1 << pipe); continue; } - last_vblank_count[pipe] = drm_crtc_vblank_count(crtc); + last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base); } for_each_pipe(dev_priv, pipe) { - struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, + pipe); long lret; if (!((1 << pipe) & crtc_mask)) @@ -13641,12 +14176,12 @@ static void intel_atomic_wait_for_vblanks(struct drm_device *dev, lret = wait_event_timeout(dev->vblank[pipe].queue, last_vblank_count[pipe] != - drm_crtc_vblank_count(crtc), + drm_crtc_vblank_count(&crtc->base), msecs_to_jiffies(50)); WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe)); - drm_crtc_vblank_put(crtc); + drm_crtc_vblank_put(&crtc->base); } } @@ -13671,6 +14206,122 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) return false; } +static void intel_update_crtc(struct drm_crtc *crtc, + struct drm_atomic_state *state, + struct drm_crtc_state *old_crtc_state, + unsigned int *crtc_vblank_mask) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state); + bool modeset = needs_modeset(crtc->state); + + if (modeset) { + update_scanline_offset(intel_crtc); + dev_priv->display.crtc_enable(pipe_config, state); + } else { + intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); + } + + if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { + intel_fbc_enable( + intel_crtc, pipe_config, + to_intel_plane_state(crtc->primary->state)); + } + + drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); + + if (needs_vblank_wait(pipe_config)) + *crtc_vblank_mask |= drm_crtc_mask(crtc); +} + +static void intel_update_crtcs(struct drm_atomic_state *state, + unsigned int *crtc_vblank_mask) +{ + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + int i; + + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { + if (!crtc->state->active) + continue; + + intel_update_crtc(crtc, state, old_crtc_state, + crtc_vblank_mask); + } +} + +static void skl_update_crtcs(struct drm_atomic_state *state, + unsigned int *crtc_vblank_mask) +{ + struct drm_i915_private *dev_priv = to_i915(state->dev); + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); + struct drm_crtc *crtc; + struct intel_crtc *intel_crtc; + struct drm_crtc_state *old_crtc_state; + struct intel_crtc_state *cstate; + unsigned int updated = 0; + bool progress; + enum pipe pipe; + int i; + + const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; + + for_each_crtc_in_state(state, crtc, old_crtc_state, i) + /* ignore allocations for crtc's that have been turned off. */ + if (crtc->state->active) + entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; + + /* + * Whenever the number of active pipes changes, we need to make sure we + * update the pipes in the right order so that their ddb allocations + * never overlap with eachother inbetween CRTC updates. Otherwise we'll + * cause pipe underruns and other bad stuff. + */ + do { + progress = false; + + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { + bool vbl_wait = false; + unsigned int cmask = drm_crtc_mask(crtc); + + intel_crtc = to_intel_crtc(crtc); + cstate = to_intel_crtc_state(crtc->state); + pipe = intel_crtc->pipe; + + if (updated & cmask || !cstate->base.active) + continue; + + if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i)) + continue; + + updated |= cmask; + entries[i] = &cstate->wm.skl.ddb; + + /* + * If this is an already active pipe, it's DDB changed, + * and this isn't the last pipe that needs updating + * then we need to wait for a vblank to pass for the + * new ddb allocation to take effect. + */ + if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb, + &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) && + !crtc->state->active_changed && + intel_state->wm_results.dirty_pipes != updated) + vbl_wait = true; + + intel_update_crtc(crtc, state, old_crtc_state, + crtc_vblank_mask); + + if (vbl_wait) + intel_wait_for_vblank(dev_priv, pipe); + + progress = true; + } + } while (progress); +} + static void intel_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; @@ -13679,37 +14330,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) struct drm_crtc_state *old_crtc_state; struct drm_crtc *crtc; struct intel_crtc_state *intel_cstate; - struct drm_plane *plane; - struct drm_plane_state *plane_state; bool hw_check = intel_state->modeset; unsigned long put_domains[I915_MAX_PIPES] = {}; unsigned crtc_vblank_mask = 0; - int i, ret; - - for_each_plane_in_state(state, plane, plane_state, i) { - struct intel_plane_state *intel_plane_state = - to_intel_plane_state(plane_state); - - if (!intel_plane_state->wait_req) - continue; - - ret = __i915_wait_request(intel_plane_state->wait_req, - true, NULL, NULL); - /* EIO should be eaten, and we can't get interrupted in the - * worker, and blocking commits have waited already. */ - WARN_ON(ret); - } + int i; drm_atomic_helper_wait_for_dependencies(state); - if (intel_state->modeset) { - memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, - sizeof(intel_state->min_pixclk)); - dev_priv->active_crtcs = intel_state->active_crtcs; - dev_priv->atomic_cdclk_freq = intel_state->cdclk; - + if (intel_state->modeset) intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); - } for_each_crtc_in_state(state, crtc, old_crtc_state, i) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -13730,7 +14359,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) if (old_crtc_state->active) { intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); - dev_priv->display.crtc_disable(crtc); + dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state); intel_crtc->active = false; intel_fbc_disable(intel_crtc); intel_disable_shared_dpll(intel_crtc); @@ -13742,8 +14371,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_check_cpu_fifo_underruns(dev_priv); intel_check_pch_fifo_underruns(dev_priv); - if (!crtc->state->active) - intel_update_watermarks(crtc); + if (!crtc->state->active) { + /* + * Make sure we don't call initial_watermarks + * for ILK-style watermark updates. + */ + if (dev_priv->display.atomic_update_watermarks) + dev_priv->display.initial_watermarks(intel_state, + to_intel_crtc_state(crtc->state)); + else + intel_update_watermarks(intel_crtc); + } } } @@ -13763,23 +14401,15 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) * SKL workaround: bspec recommends we disable the SAGV when we * have more then one pipe enabled */ - if (IS_SKYLAKE(dev_priv) && !skl_can_enable_sagv(state)) - skl_disable_sagv(dev_priv); + if (!intel_can_enable_sagv(state)) + intel_disable_sagv(dev_priv); - intel_modeset_verify_disabled(dev); + intel_modeset_verify_disabled(dev, state); } - /* Now enable the clocks, plane, pipe, and connectors that we set up. */ + /* Complete the events for pipes that have now been disabled */ for_each_crtc_in_state(state, crtc, old_crtc_state, i) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); bool modeset = needs_modeset(crtc->state); - struct intel_crtc_state *pipe_config = - to_intel_crtc_state(crtc->state); - - if (modeset && crtc->state->active) { - update_scanline_offset(to_intel_crtc(crtc)); - dev_priv->display.crtc_enable(crtc); - } /* Complete events for now disable pipes here. */ if (modeset && !crtc->state->active && crtc->state->event) { @@ -13789,21 +14419,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) crtc->state->event = NULL; } - - if (!modeset) - intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); - - if (crtc->state->active && - drm_atomic_get_existing_plane_state(state, crtc->primary)) - intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state)); - - if (crtc->state->active) - drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); - - if (pipe_config->base.active && needs_vblank_wait(pipe_config)) - crtc_vblank_mask |= 1 << i; } + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ + dev_priv->display.update_crtcs(state, &crtc_vblank_mask); + /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To * fix this: @@ -13827,7 +14447,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_cstate = to_intel_crtc_state(crtc->state); if (dev_priv->display.optimize_watermarks) - dev_priv->display.optimize_watermarks(intel_cstate); + dev_priv->display.optimize_watermarks(intel_state, + intel_cstate); } for_each_crtc_in_state(state, crtc, old_crtc_state, i) { @@ -13836,12 +14457,11 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) if (put_domains[i]) modeset_put_power_domains(dev_priv, put_domains[i]); - intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); + intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state); } - if (IS_SKYLAKE(dev_priv) && intel_state->modeset && - skl_can_enable_sagv(state)) - skl_enable_sagv(dev_priv); + if (intel_state->modeset && intel_can_enable_sagv(state)) + intel_enable_sagv(dev_priv); drm_atomic_helper_commit_hw_done(state); @@ -13854,7 +14474,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_commit_cleanup_done(state); - drm_atomic_state_free(state); + drm_atomic_state_put(state); /* As one of the primary mmio accessors, KMS has a high likelihood * of triggering bugs in unclaimed access. After we finish @@ -13872,29 +14492,43 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) static void intel_atomic_commit_work(struct work_struct *work) { - struct drm_atomic_state *state = container_of(work, - struct drm_atomic_state, - commit_work); + struct drm_atomic_state *state = + container_of(work, struct drm_atomic_state, commit_work); + intel_atomic_commit_tail(state); } +static int __i915_sw_fence_call +intel_atomic_commit_ready(struct i915_sw_fence *fence, + enum i915_sw_fence_notify notify) +{ + struct intel_atomic_state *state = + container_of(fence, struct intel_atomic_state, commit_ready); + + switch (notify) { + case FENCE_COMPLETE: + if (state->base.commit_work.func) + queue_work(system_unbound_wq, &state->base.commit_work); + break; + + case FENCE_FREE: + drm_atomic_state_put(&state->base); + break; + } + + return NOTIFY_DONE; +} + static void intel_atomic_track_fbs(struct drm_atomic_state *state) { struct drm_plane_state *old_plane_state; struct drm_plane *plane; - struct drm_i915_gem_object *obj, *old_obj; - struct intel_plane *intel_plane; int i; - mutex_lock(&state->dev->struct_mutex); - for_each_plane_in_state(state, plane, old_plane_state, i) { - obj = intel_fb_obj(plane->state->fb); - old_obj = intel_fb_obj(old_plane_state->fb); - intel_plane = to_intel_plane(plane); - - i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); - } - mutex_unlock(&state->dev->struct_mutex); + for_each_plane_in_state(state, plane, old_plane_state, i) + i915_gem_track_fb(intel_fb_obj(old_plane_state->fb), + intel_fb_obj(plane->state->fb), + to_intel_plane(plane)->frontbuffer_bit); } /** @@ -13906,10 +14540,6 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state) * This function commits a top-level state object that has been validated * with drm_atomic_helper_check(). * - * FIXME: Atomic modeset support for i915 is not yet complete. At the moment - * nonblocking commits are only safe for pure plane updates. Everything else - * should work though. - * * RETURNS * Zero for success or -errno. */ @@ -13921,33 +14551,42 @@ static int intel_atomic_commit(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; - if (intel_state->modeset && nonblock) { - DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n"); - return -EINVAL; - } - ret = drm_atomic_helper_setup_commit(state, nonblock); if (ret) return ret; - INIT_WORK(&state->commit_work, intel_atomic_commit_work); + drm_atomic_state_get(state); + i915_sw_fence_init(&intel_state->commit_ready, + intel_atomic_commit_ready); - ret = intel_atomic_prepare_commit(dev, state, nonblock); + ret = intel_atomic_prepare_commit(dev, state); if (ret) { DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); + i915_sw_fence_commit(&intel_state->commit_ready); return ret; } drm_atomic_helper_swap_state(state, true); dev_priv->wm.distrust_bios_wm = false; - dev_priv->wm.skl_results = intel_state->wm_results; intel_shared_dpll_commit(state); intel_atomic_track_fbs(state); - if (nonblock) - queue_work(system_unbound_wq, &state->commit_work); - else + if (intel_state->modeset) { + memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, + sizeof(intel_state->min_pixclk)); + dev_priv->active_crtcs = intel_state->active_crtcs; + dev_priv->atomic_cdclk_freq = intel_state->cdclk; + } + + drm_atomic_state_get(state); + INIT_WORK(&state->commit_work, + nonblock ? intel_atomic_commit_work : NULL); + + i915_sw_fence_commit(&intel_state->commit_ready); + if (!nonblock) { + i915_sw_fence_wait(&intel_state->commit_ready); intel_atomic_commit_tail(state); + } return 0; } @@ -13985,13 +14624,10 @@ retry: goto retry; } - if (ret) out: - drm_atomic_state_free(state); + drm_atomic_state_put(state); } -#undef for_each_intel_crtc_masked - /* * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling * drm_atomic_helper_legacy_gamma_set() directly. @@ -14060,21 +14696,23 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { */ int intel_prepare_plane_fb(struct drm_plane *plane, - const struct drm_plane_state *new_state) + struct drm_plane_state *new_state) { - struct drm_device *dev = plane->dev; + struct intel_atomic_state *intel_state = + to_intel_atomic_state(new_state->state); + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_framebuffer *fb = new_state->fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); - struct reservation_object *resv; - int ret = 0; + int ret; if (!obj && !old_obj) return 0; if (old_obj) { struct drm_crtc_state *crtc_state = - drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc); + drm_atomic_get_existing_crtc_state(new_state->state, + plane->state->crtc); /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the @@ -14087,50 +14725,58 @@ intel_prepare_plane_fb(struct drm_plane *plane, * This should only fail upon a hung GPU, in which case we * can safely continue. */ - if (needs_modeset(crtc_state)) - ret = i915_gem_object_wait_rendering(old_obj, true); - if (ret) { - /* GPU hangs should have been swallowed by the wait */ - WARN_ON(ret == -EIO); - return ret; + if (needs_modeset(crtc_state)) { + ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, + old_obj->resv, NULL, + false, 0, + GFP_KERNEL); + if (ret < 0) + return ret; } } + if (new_state->fence) { /* explicit fencing */ + ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, + new_state->fence, + I915_FENCE_TIMEOUT, + GFP_KERNEL); + if (ret < 0) + return ret; + } + if (!obj) return 0; - /* For framebuffer backed by dmabuf, wait for fence */ - resv = i915_gem_object_get_dmabuf_resv(obj); - if (resv) { - long lret; - - lret = reservation_object_wait_timeout_rcu(resv, false, true, - MAX_SCHEDULE_TIMEOUT); - if (lret == -ERESTARTSYS) - return lret; + if (!new_state->fence) { /* implicit fencing */ + ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, + obj->resv, NULL, + false, I915_FENCE_TIMEOUT, + GFP_KERNEL); + if (ret < 0) + return ret; - WARN(lret < 0, "waiting returns %li\n", lret); + i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); } if (plane->type == DRM_PLANE_TYPE_CURSOR && - INTEL_INFO(dev)->cursor_needs_physical) { - int align = IS_I830(dev) ? 16 * 1024 : 256; + INTEL_INFO(dev_priv)->cursor_needs_physical) { + int align = IS_I830(dev_priv) ? 16 * 1024 : 256; ret = i915_gem_object_attach_phys(obj, align); - if (ret) + if (ret) { DRM_DEBUG_KMS("failed to attach phys object\n"); + return ret; + } } else { - ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation); - } + struct i915_vma *vma; - if (ret == 0) { - struct intel_plane_state *plane_state = - to_intel_plane_state(new_state); - - i915_gem_request_assign(&plane_state->wait_req, - obj->last_write_req); + vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation); + if (IS_ERR(vma)) { + DRM_DEBUG_KMS("failed to pin object\n"); + return PTR_ERR(vma); + } } - return ret; + return 0; } /** @@ -14144,9 +14790,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, */ void intel_cleanup_plane_fb(struct drm_plane *plane, - const struct drm_plane_state *old_state) + struct drm_plane_state *old_state) { - struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct intel_plane_state *old_intel_state; struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); @@ -14157,10 +14803,8 @@ intel_cleanup_plane_fb(struct drm_plane *plane, return; if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || - !INTEL_INFO(dev)->cursor_needs_physical)) + !INTEL_INFO(dev_priv)->cursor_needs_physical)) intel_unpin_fb_obj(old_state->fb, old_state->rotation); - - i915_gem_request_assign(&old_intel_state->wait_req, NULL); } int @@ -14194,13 +14838,14 @@ intel_check_primary_plane(struct drm_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state) { + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_crtc *crtc = state->base.crtc; - struct drm_framebuffer *fb = state->base.fb; int min_scale = DRM_PLANE_HELPER_NO_SCALING; int max_scale = DRM_PLANE_HELPER_NO_SCALING; bool can_position = false; + int ret; - if (INTEL_INFO(plane->dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { /* use scaler when colorkey is not required */ if (state->ckey.flags == I915_SET_COLORKEY_NONE) { min_scale = 1; @@ -14209,38 +14854,59 @@ intel_check_primary_plane(struct drm_plane *plane, can_position = true; } - return drm_plane_helper_check_update(plane, crtc, fb, &state->src, - &state->dst, &state->clip, - state->base.rotation, - min_scale, max_scale, - can_position, true, - &state->visible); + ret = drm_plane_helper_check_state(&state->base, + &state->clip, + min_scale, max_scale, + can_position, true); + if (ret) + return ret; + + if (!state->base.fb) + return 0; + + if (INTEL_GEN(dev_priv) >= 9) { + ret = skl_check_plane_surface(state); + if (ret) + return ret; + } + + return 0; } static void intel_begin_crtc_commit(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *old_intel_state = + struct intel_crtc_state *intel_cstate = + to_intel_crtc_state(crtc->state); + struct intel_crtc_state *old_intel_cstate = to_intel_crtc_state(old_crtc_state); + struct intel_atomic_state *old_intel_state = + to_intel_atomic_state(old_crtc_state->state); bool modeset = needs_modeset(crtc->state); /* Perform vblank evasion around commit operation */ intel_pipe_update_start(intel_crtc); if (modeset) - return; + goto out; if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) { intel_color_set_csc(crtc->state); intel_color_load_luts(crtc->state); } - if (to_intel_crtc_state(crtc->state)->update_pipe) - intel_update_pipe_config(intel_crtc, old_intel_state); - else if (INTEL_INFO(dev)->gen >= 9) + if (intel_cstate->update_pipe) + intel_update_pipe_config(intel_crtc, old_intel_cstate); + else if (INTEL_GEN(dev_priv) >= 9) skl_detach_scalers(intel_crtc); + +out: + if (dev_priv->display.atomic_update_watermarks) + dev_priv->display.atomic_update_watermarks(old_intel_state, + intel_cstate); } static void intel_finish_crtc_commit(struct drm_crtc *crtc, @@ -14260,9 +14926,6 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc, */ void intel_plane_destroy(struct drm_plane *plane) { - if (!plane) - return; - drm_plane_cleanup(plane); kfree(to_intel_plane(plane)); } @@ -14276,53 +14939,63 @@ const struct drm_plane_funcs intel_plane_funcs = { .atomic_set_property = intel_plane_atomic_set_property, .atomic_duplicate_state = intel_plane_duplicate_state, .atomic_destroy_state = intel_plane_destroy_state, - }; -static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, - int pipe) +static struct intel_plane * +intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_plane *primary = NULL; struct intel_plane_state *state = NULL; const uint32_t *intel_primary_formats; + unsigned int supported_rotations; unsigned int num_formats; int ret; primary = kzalloc(sizeof(*primary), GFP_KERNEL); - if (!primary) + if (!primary) { + ret = -ENOMEM; goto fail; + } state = intel_create_plane_state(&primary->base); - if (!state) + if (!state) { + ret = -ENOMEM; goto fail; + } + primary->base.state = &state->base; primary->can_scale = false; primary->max_downscale = 1; - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { primary->can_scale = true; state->scaler_id = -1; } primary->pipe = pipe; - primary->plane = pipe; + /* + * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS + * port is hooked to pipe B. Hence we want plane A feeding pipe B. + */ + if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) + primary->plane = (enum plane) !pipe; + else + primary->plane = (enum plane) pipe; primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); primary->check_plane = intel_check_primary_plane; - if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) - primary->plane = !pipe; - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { intel_primary_formats = skl_primary_formats; num_formats = ARRAY_SIZE(skl_primary_formats); primary->update_plane = skylake_update_primary_plane; primary->disable_plane = skylake_disable_primary_plane; - } else if (HAS_PCH_SPLIT(dev)) { + } else if (HAS_PCH_SPLIT(dev_priv)) { intel_primary_formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); primary->update_plane = ironlake_update_primary_plane; primary->disable_plane = i9xx_disable_primary_plane; - } else if (INTEL_INFO(dev)->gen >= 4) { + } else if (INTEL_GEN(dev_priv) >= 4) { intel_primary_formats = i965_primary_formats; num_formats = ARRAY_SIZE(i965_primary_formats); @@ -14336,57 +15009,56 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, primary->disable_plane = i9xx_disable_primary_plane; } - if (INTEL_INFO(dev)->gen >= 9) - ret = drm_universal_plane_init(dev, &primary->base, 0, - &intel_plane_funcs, + if (INTEL_GEN(dev_priv) >= 9) + ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, + 0, &intel_plane_funcs, intel_primary_formats, num_formats, DRM_PLANE_TYPE_PRIMARY, "plane 1%c", pipe_name(pipe)); - else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) - ret = drm_universal_plane_init(dev, &primary->base, 0, - &intel_plane_funcs, + else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, + 0, &intel_plane_funcs, intel_primary_formats, num_formats, DRM_PLANE_TYPE_PRIMARY, "primary %c", pipe_name(pipe)); else - ret = drm_universal_plane_init(dev, &primary->base, 0, - &intel_plane_funcs, + ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, + 0, &intel_plane_funcs, intel_primary_formats, num_formats, DRM_PLANE_TYPE_PRIMARY, "plane %c", plane_name(primary->plane)); if (ret) goto fail; - if (INTEL_INFO(dev)->gen >= 4) - intel_create_rotation_property(dev, primary); + if (INTEL_GEN(dev_priv) >= 9) { + supported_rotations = + DRM_ROTATE_0 | DRM_ROTATE_90 | + DRM_ROTATE_180 | DRM_ROTATE_270; + } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { + supported_rotations = + DRM_ROTATE_0 | DRM_ROTATE_180 | + DRM_REFLECT_X; + } else if (INTEL_GEN(dev_priv) >= 4) { + supported_rotations = + DRM_ROTATE_0 | DRM_ROTATE_180; + } else { + supported_rotations = DRM_ROTATE_0; + } + + if (INTEL_GEN(dev_priv) >= 4) + drm_plane_create_rotation_property(&primary->base, + DRM_ROTATE_0, + supported_rotations); drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); - return &primary->base; + return primary; fail: kfree(state); kfree(primary); - return NULL; -} - -void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane) -{ - if (!dev->mode_config.rotation_property) { - unsigned long flags = BIT(DRM_ROTATE_0) | - BIT(DRM_ROTATE_180); - - if (INTEL_INFO(dev)->gen >= 9) - flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270); - - dev->mode_config.rotation_property = - drm_mode_create_rotation_property(dev, flags); - } - if (dev->mode_config.rotation_property) - drm_object_attach_property(&plane->base.base, - dev->mode_config.rotation_property, - plane->base.state->rotation); + return ERR_PTR(ret); } static int @@ -14394,19 +15066,17 @@ intel_check_cursor_plane(struct drm_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state) { - struct drm_crtc *crtc = crtc_state->base.crtc; struct drm_framebuffer *fb = state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); enum pipe pipe = to_intel_plane(plane)->pipe; unsigned stride; int ret; - ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, - &state->dst, &state->clip, - state->base.rotation, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - true, true, &state->visible); + ret = drm_plane_helper_check_state(&state->base, + &state->clip, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true, true); if (ret) return ret; @@ -14415,7 +15085,8 @@ intel_check_cursor_plane(struct drm_plane *plane, return 0; /* Check for which cursor types we support */ - if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) { + if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w, + state->base.crtc_h)) { DRM_DEBUG("Cursor dimension %dx%d not supported\n", state->base.crtc_w, state->base.crtc_h); return -EINVAL; @@ -14427,7 +15098,7 @@ intel_check_cursor_plane(struct drm_plane *plane, return -ENOMEM; } - if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { + if (fb->modifier != DRM_FORMAT_MOD_NONE) { DRM_DEBUG_KMS("cursor cannot be tiled\n"); return -EINVAL; } @@ -14442,8 +15113,8 @@ intel_check_cursor_plane(struct drm_plane *plane, * display power well must be turned off and on again. * Refuse the put the cursor into that compromised position. */ - if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && - state->visible && state->base.crtc_x < 0) { + if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C && + state->base.visible && state->base.crtc_x < 0) { DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); return -EINVAL; } @@ -14468,14 +15139,14 @@ intel_update_cursor_plane(struct drm_plane *plane, { struct drm_crtc *crtc = crtc_state->base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_device *dev = plane->dev; + struct drm_i915_private *dev_priv = to_i915(plane->dev); struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); uint32_t addr; if (!obj) addr = 0; - else if (!INTEL_INFO(dev)->cursor_needs_physical) - addr = i915_gem_obj_ggtt_offset(obj); + else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) + addr = i915_gem_object_ggtt_offset(obj, NULL); else addr = obj->phys_handle->busaddr; @@ -14483,20 +15154,25 @@ intel_update_cursor_plane(struct drm_plane *plane, intel_crtc_update_cursor(crtc, state); } -static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, - int pipe) +static struct intel_plane * +intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_plane *cursor = NULL; struct intel_plane_state *state = NULL; int ret; cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); - if (!cursor) + if (!cursor) { + ret = -ENOMEM; goto fail; + } state = intel_create_plane_state(&cursor->base); - if (!state) + if (!state) { + ret = -ENOMEM; goto fail; + } + cursor->base.state = &state->base; cursor->can_scale = false; @@ -14508,8 +15184,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, cursor->update_plane = intel_update_cursor_plane; cursor->disable_plane = intel_disable_cursor_plane; - ret = drm_universal_plane_init(dev, &cursor->base, 0, - &intel_plane_funcs, + ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, + 0, &intel_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), DRM_PLANE_TYPE_CURSOR, @@ -14517,102 +15193,106 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, if (ret) goto fail; - if (INTEL_INFO(dev)->gen >= 4) { - if (!dev->mode_config.rotation_property) - dev->mode_config.rotation_property = - drm_mode_create_rotation_property(dev, - BIT(DRM_ROTATE_0) | - BIT(DRM_ROTATE_180)); - if (dev->mode_config.rotation_property) - drm_object_attach_property(&cursor->base.base, - dev->mode_config.rotation_property, - state->base.rotation); - } + if (INTEL_GEN(dev_priv) >= 4) + drm_plane_create_rotation_property(&cursor->base, + DRM_ROTATE_0, + DRM_ROTATE_0 | + DRM_ROTATE_180); - if (INTEL_INFO(dev)->gen >=9) + if (INTEL_GEN(dev_priv) >= 9) state->scaler_id = -1; drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); - return &cursor->base; + return cursor; fail: kfree(state); kfree(cursor); - return NULL; + return ERR_PTR(ret); } -static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state) +static void skl_init_scalers(struct drm_i915_private *dev_priv, + struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) { + struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; int i; - struct intel_scaler *intel_scaler; - struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; - for (i = 0; i < intel_crtc->num_scalers; i++) { - intel_scaler = &scaler_state->scalers[i]; - intel_scaler->in_use = 0; - intel_scaler->mode = PS_SCALER_MODE_DYN; + for (i = 0; i < crtc->num_scalers; i++) { + struct intel_scaler *scaler = &scaler_state->scalers[i]; + + scaler->in_use = 0; + scaler->mode = PS_SCALER_MODE_DYN; } scaler_state->scaler_id = -1; } -static void intel_crtc_init(struct drm_device *dev, int pipe) +static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) { - struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc; struct intel_crtc_state *crtc_state = NULL; - struct drm_plane *primary = NULL; - struct drm_plane *cursor = NULL; - int ret; + struct intel_plane *primary = NULL; + struct intel_plane *cursor = NULL; + int sprite, ret; intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); - if (intel_crtc == NULL) - return; + if (!intel_crtc) + return -ENOMEM; crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); - if (!crtc_state) + if (!crtc_state) { + ret = -ENOMEM; goto fail; + } intel_crtc->config = crtc_state; intel_crtc->base.state = &crtc_state->base; crtc_state->base.crtc = &intel_crtc->base; /* initialize shared scalers */ - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_GEN(dev_priv) >= 9) { if (pipe == PIPE_C) intel_crtc->num_scalers = 1; else intel_crtc->num_scalers = SKL_NUM_SCALERS; - skl_init_scalers(dev, intel_crtc, crtc_state); + skl_init_scalers(dev_priv, intel_crtc, crtc_state); } - primary = intel_primary_plane_create(dev, pipe); - if (!primary) + primary = intel_primary_plane_create(dev_priv, pipe); + if (IS_ERR(primary)) { + ret = PTR_ERR(primary); goto fail; + } + + for_each_sprite(dev_priv, pipe, sprite) { + struct intel_plane *plane; - cursor = intel_cursor_plane_create(dev, pipe); - if (!cursor) + plane = intel_sprite_plane_create(dev_priv, pipe, sprite); + if (IS_ERR(plane)) { + ret = PTR_ERR(plane); + goto fail; + } + } + + cursor = intel_cursor_plane_create(dev_priv, pipe); + if (IS_ERR(cursor)) { + ret = PTR_ERR(cursor); goto fail; + } - ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, - cursor, &intel_crtc_funcs, + ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, + &primary->base, &cursor->base, + &intel_crtc_funcs, "pipe %c", pipe_name(pipe)); if (ret) goto fail; - /* - * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port - * is hooked to pipe B. Hence we want plane A feeding pipe B. - */ intel_crtc->pipe = pipe; - intel_crtc->plane = pipe; - if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { - DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); - intel_crtc->plane = !pipe; - } + intel_crtc->plane = primary->plane; intel_crtc->cursor_base = ~0; intel_crtc->cursor_cntl = ~0; @@ -14622,21 +15302,26 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); - dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; - dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; + dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc; + dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc; drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); intel_color_init(&intel_crtc->base); WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); - return; + + return 0; fail: - intel_plane_destroy(primary); - intel_plane_destroy(cursor); + /* + * drm_mode_config_cleanup() will free up any + * crtcs/planes already initialized. + */ kfree(crtc_state); kfree(intel_crtc); + + return ret; } enum pipe intel_get_pipe_from_connector(struct intel_connector *connector) @@ -14686,40 +15371,37 @@ static int intel_encoder_clones(struct intel_encoder *encoder) return index_mask; } -static bool has_edp_a(struct drm_device *dev) +static bool has_edp_a(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - - if (!IS_MOBILE(dev)) + if (!IS_MOBILE(dev_priv)) return false; if ((I915_READ(DP_A) & DP_DETECTED) == 0) return false; - if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) + if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) return false; return true; } -static bool intel_crt_present(struct drm_device *dev) +static bool intel_crt_present(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - - if (INTEL_INFO(dev)->gen >= 9) + if (INTEL_GEN(dev_priv) >= 9) return false; - if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) + if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) return false; - if (IS_CHERRYVIEW(dev)) + if (IS_CHERRYVIEW(dev_priv)) return false; - if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) + if (HAS_PCH_LPT_H(dev_priv) && + I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) return false; /* DDI E can't be used if DDI A requires 4 lanes */ - if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) + if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) return false; if (!dev_priv->vbt.int_crt_support) @@ -14728,12 +15410,50 @@ static bool intel_crt_present(struct drm_device *dev) return true; } +void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) +{ + int pps_num; + int pps_idx; + + if (HAS_DDI(dev_priv)) + return; + /* + * This w/a is needed at least on CPT/PPT, but to be sure apply it + * everywhere where registers can be write protected. + */ + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + pps_num = 2; + else + pps_num = 1; + + for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { + u32 val = I915_READ(PP_CONTROL(pps_idx)); + + val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; + I915_WRITE(PP_CONTROL(pps_idx), val); + } +} + +static void intel_pps_init(struct drm_i915_private *dev_priv) +{ + if (HAS_PCH_SPLIT(dev_priv) || IS_BROXTON(dev_priv)) + dev_priv->pps_mmio_base = PCH_PPS_BASE; + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + dev_priv->pps_mmio_base = VLV_PPS_BASE; + else + dev_priv->pps_mmio_base = PPS_BASE; + + intel_pps_unlock_regs_wa(dev_priv); +} + static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct intel_encoder *encoder; bool dpd_is_edp = false; + intel_pps_init(dev_priv); + /* * intel_edp_init_connector() depends on this completing first, to * prevent the registeration of both eDP and LVDS and the incorrect @@ -14741,10 +15461,10 @@ static void intel_setup_outputs(struct drm_device *dev) */ intel_lvds_init(dev); - if (intel_crt_present(dev)) + if (intel_crt_present(dev_priv)) intel_crt_init(dev); - if (IS_BROXTON(dev)) { + if (IS_BROXTON(dev_priv)) { /* * FIXME: Broxton doesn't support port detection via the * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to @@ -14755,7 +15475,7 @@ static void intel_setup_outputs(struct drm_device *dev) intel_ddi_init(dev, PORT_C); intel_dsi_init(dev); - } else if (HAS_DDI(dev)) { + } else if (HAS_DDI(dev_priv)) { int found; /* @@ -14765,7 +15485,7 @@ static void intel_setup_outputs(struct drm_device *dev) */ found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; /* WaIgnoreDDIAStrap: skl */ - if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) + if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) intel_ddi_init(dev, PORT_A); /* DDI B, C and D detection is indicated by the SFUSE_STRAP @@ -14781,17 +15501,17 @@ static void intel_setup_outputs(struct drm_device *dev) /* * On SKL we don't have a way to detect DDI-E so we rely on VBT. */ - if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) intel_ddi_init(dev, PORT_E); - } else if (HAS_PCH_SPLIT(dev)) { + } else if (HAS_PCH_SPLIT(dev_priv)) { int found; - dpd_is_edp = intel_dp_is_edp(dev, PORT_D); + dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D); - if (has_edp_a(dev)) + if (has_edp_a(dev_priv)) intel_dp_init(dev, DP_A, PORT_A); if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { @@ -14814,7 +15534,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_D) & DP_DETECTED) intel_dp_init(dev, PCH_DP_D, PORT_D); - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { bool has_edp, has_port; /* @@ -14832,21 +15552,21 @@ static void intel_setup_outputs(struct drm_device *dev) * trust the port type the VBT declares as we've seen at least * HDMI ports that the VBT claim are DP or eDP. */ - has_edp = intel_dp_is_edp(dev, PORT_B); + has_edp = intel_dp_is_edp(dev_priv, PORT_B); has_port = intel_bios_is_port_present(dev_priv, PORT_B); if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIB, PORT_B); - has_edp = intel_dp_is_edp(dev, PORT_C); + has_edp = intel_dp_is_edp(dev_priv, PORT_C); has_port = intel_bios_is_port_present(dev_priv, PORT_C); if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) intel_hdmi_init(dev, VLV_HDMIC, PORT_C); - if (IS_CHERRYVIEW(dev)) { + if (IS_CHERRYVIEW(dev_priv)) { /* * eDP not supported on port D, * so no need to worry about it @@ -14859,18 +15579,18 @@ static void intel_setup_outputs(struct drm_device *dev) } intel_dsi_init(dev); - } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) { + } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { bool found = false; if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { DRM_DEBUG_KMS("probing SDVOB\n"); found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B); - if (!found && IS_G4X(dev)) { + if (!found && IS_G4X(dev_priv)) { DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); } - if (!found && IS_G4X(dev)) + if (!found && IS_G4X(dev_priv)) intel_dp_init(dev, DP_B, PORT_B); } @@ -14883,21 +15603,20 @@ static void intel_setup_outputs(struct drm_device *dev) if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { - if (IS_G4X(dev)) { + if (IS_G4X(dev_priv)) { DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); } - if (IS_G4X(dev)) + if (IS_G4X(dev_priv)) intel_dp_init(dev, DP_C, PORT_C); } - if (IS_G4X(dev) && - (I915_READ(DP_D) & DP_DETECTED)) + if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) intel_dp_init(dev, DP_D, PORT_D); - } else if (IS_GEN2(dev)) + } else if (IS_GEN2(dev_priv)) intel_dvo_init(dev); - if (SUPPORTS_TV(dev)) + if (SUPPORTS_TV(dev_priv)) intel_tv_init(dev); intel_psr_init(dev); @@ -14921,7 +15640,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) drm_framebuffer_cleanup(fb); mutex_lock(&dev->struct_mutex); WARN_ON(!intel_fb->obj->framebuffer_references--); - drm_gem_object_unreference(&intel_fb->obj->base); + i915_gem_object_put(intel_fb->obj); mutex_unlock(&dev->struct_mutex); kfree(intel_fb); } @@ -14952,6 +15671,8 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, struct drm_i915_gem_object *obj = intel_fb->obj; mutex_lock(&dev->struct_mutex); + if (obj->pin_display && obj->cache_dirty) + i915_gem_clflush_object(obj, true); intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); mutex_unlock(&dev->struct_mutex); @@ -14965,10 +15686,10 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { }; static -u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, - uint32_t pixel_format) +u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, + uint64_t fb_modifier, uint32_t pixel_format) { - u32 gen = INTEL_INFO(dev)->gen; + u32 gen = INTEL_INFO(dev_priv)->gen; if (gen >= 9) { int cpp = drm_format_plane_cpp(pixel_format, 0); @@ -14977,7 +15698,8 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, * pixels and 32K bytes." */ return min(8192 * cpp, 32768); - } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { + } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) && + !IS_CHERRYVIEW(dev_priv)) { return 32*1024; } else if (gen >= 4) { if (fb_modifier == I915_FORMAT_MOD_X_TILED) @@ -15001,24 +15723,27 @@ static int intel_framebuffer_init(struct drm_device *dev, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(dev); - unsigned int aligned_height; + unsigned int tiling = i915_gem_object_get_tiling(obj); int ret; u32 pitch_limit, stride_alignment; + struct drm_format_name_buf format_name; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { - /* Enforce that fb modifier and tiling mode match, but only for - * X-tiled. This is needed for FBC. */ - if (!!(obj->tiling_mode == I915_TILING_X) != - !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { + /* + * If there's a fence, enforce that + * the fb modifier and tiling mode match. + */ + if (tiling != I915_TILING_NONE && + tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); return -EINVAL; } } else { - if (obj->tiling_mode == I915_TILING_X) + if (tiling == I915_TILING_X) { mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; - else if (obj->tiling_mode == I915_TILING_Y) { + } else if (tiling == I915_TILING_Y) { DRM_DEBUG("No Y tiling for legacy addfb\n"); return -EINVAL; } @@ -15028,7 +15753,7 @@ static int intel_framebuffer_init(struct drm_device *dev, switch (mode_cmd->modifier[0]) { case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: - if (INTEL_INFO(dev)->gen < 9) { + if (INTEL_GEN(dev_priv) < 9) { DRM_DEBUG("Unsupported tiling 0x%llx!\n", mode_cmd->modifier[0]); return -EINVAL; @@ -15042,6 +15767,16 @@ static int intel_framebuffer_init(struct drm_device *dev, return -EINVAL; } + /* + * gen2/3 display engine uses the fence if present, + * so the tiling mode must match the fb modifier exactly. + */ + if (INTEL_INFO(dev_priv)->gen < 4 && + tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { + DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n"); + return -EINVAL; + } + stride_alignment = intel_fb_stride_alignment(dev_priv, mode_cmd->modifier[0], mode_cmd->pixel_format); @@ -15051,7 +15786,7 @@ static int intel_framebuffer_init(struct drm_device *dev, return -EINVAL; } - pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], + pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0], mode_cmd->pixel_format); if (mode_cmd->pitches[0] > pitch_limit) { DRM_DEBUG("%s pitch (%u) must be at less than %d\n", @@ -15061,10 +15796,15 @@ static int intel_framebuffer_init(struct drm_device *dev, return -EINVAL; } - if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && - mode_cmd->pitches[0] != obj->stride) { + /* + * If there's a fence, enforce that + * the fb pitch and fence stride match. + */ + if (tiling != I915_TILING_NONE && + mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) { DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", - mode_cmd->pitches[0], obj->stride); + mode_cmd->pitches[0], + i915_gem_object_get_stride(obj)); return -EINVAL; } @@ -15076,33 +15816,33 @@ static int intel_framebuffer_init(struct drm_device *dev, case DRM_FORMAT_ARGB8888: break; case DRM_FORMAT_XRGB1555: - if (INTEL_INFO(dev)->gen > 3) { + if (INTEL_GEN(dev_priv) > 3) { DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + drm_get_format_name(mode_cmd->pixel_format, &format_name)); return -EINVAL; } break; case DRM_FORMAT_ABGR8888: - if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && - INTEL_INFO(dev)->gen < 9) { + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && + INTEL_GEN(dev_priv) < 9) { DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + drm_get_format_name(mode_cmd->pixel_format, &format_name)); return -EINVAL; } break; case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: - if (INTEL_INFO(dev)->gen < 4) { + if (INTEL_GEN(dev_priv) < 4) { DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + drm_get_format_name(mode_cmd->pixel_format, &format_name)); return -EINVAL; } break; case DRM_FORMAT_ABGR2101010: - if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { + if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + drm_get_format_name(mode_cmd->pixel_format, &format_name)); return -EINVAL; } break; @@ -15110,15 +15850,15 @@ static int intel_framebuffer_init(struct drm_device *dev, case DRM_FORMAT_UYVY: case DRM_FORMAT_YVYU: case DRM_FORMAT_VYUY: - if (INTEL_INFO(dev)->gen < 5) { + if (INTEL_GEN(dev_priv) < 5) { DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + drm_get_format_name(mode_cmd->pixel_format, &format_name)); return -EINVAL; } break; default: DRM_DEBUG("unsupported pixel format: %s\n", - drm_get_format_name(mode_cmd->pixel_format)); + drm_get_format_name(mode_cmd->pixel_format, &format_name)); return -EINVAL; } @@ -15126,17 +15866,12 @@ static int intel_framebuffer_init(struct drm_device *dev, if (mode_cmd->offsets[0] != 0) return -EINVAL; - aligned_height = intel_fb_align_height(dev, mode_cmd->height, - mode_cmd->pixel_format, - mode_cmd->modifier[0]); - /* FIXME drm helper for size checks (especially planar formats)? */ - if (obj->base.size < aligned_height * mode_cmd->pitches[0]) - return -EINVAL; - drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); intel_fb->obj = obj; - intel_fill_fb_info(dev_priv, &intel_fb->base); + ret = intel_fill_fb_info(dev_priv, &intel_fb->base); + if (ret) + return ret; ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { @@ -15158,23 +15893,17 @@ intel_user_framebuffer_create(struct drm_device *dev, struct drm_i915_gem_object *obj; struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; - obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0])); - if (&obj->base == NULL) + obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); + if (!obj) return ERR_PTR(-ENOENT); fb = intel_framebuffer_create(dev, &mode_cmd, obj); if (IS_ERR(fb)) - drm_gem_object_unreference_unlocked(&obj->base); + i915_gem_object_put(obj); return fb; } -#ifndef CONFIG_DRM_FBDEV_EMULATION -static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) -{ -} -#endif - static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, .output_poll_changed = intel_fbdev_output_poll_changed, @@ -15347,6 +16076,11 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) skl_modeset_calc_cdclk; } + if (dev_priv->info.gen >= 9) + dev_priv->display.update_crtcs = skl_update_crtcs; + else + dev_priv->display.update_crtcs = intel_update_crtcs; + switch (INTEL_INFO(dev_priv)->gen) { case 2: dev_priv->display.queue_flip = intel_gen2_queue_flip; @@ -15545,18 +16279,18 @@ static void intel_init_quirks(struct drm_device *dev) } /* Disable the VGA plane that we never use */ -static void i915_disable_vga(struct drm_device *dev) +static void i915_disable_vga(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct pci_dev *pdev = dev_priv->drm.pdev; u8 sr1; - i915_reg_t vga_reg = i915_vgacntrl_reg(dev); + i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ - vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); + vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); outb(SR01, VGA_SR_INDEX); sr1 = inb(VGA_SR_DATA); outb(sr1 | 1<<5, VGA_SR_DATA); - vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); + vga_put(pdev, VGA_RSRC_LEGACY_IO); udelay(300); I915_WRITE(vga_reg, VGA_DISP_DISABLE); @@ -15567,12 +16301,11 @@ void intel_modeset_init_hw(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - intel_update_cdclk(dev); + intel_update_cdclk(dev_priv); dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; - intel_init_clock_gating(dev); - intel_enable_gt_powersave(dev_priv); + intel_init_clock_gating(dev_priv); } /* @@ -15589,6 +16322,7 @@ static void sanitize_watermarks(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *state; + struct intel_atomic_state *intel_state; struct drm_crtc *crtc; struct drm_crtc_state *cstate; struct drm_modeset_acquire_ctx ctx; @@ -15617,12 +16351,14 @@ retry: if (WARN_ON(IS_ERR(state))) goto fail; + intel_state = to_intel_atomic_state(state); + /* * Hardware readout is the only time we don't want to calculate * intermediate watermarks (since we don't trust the current * watermarks). */ - to_intel_atomic_state(state)->skip_intermediate_wm = true; + intel_state->skip_intermediate_wm = true; ret = intel_atomic_check(dev, state); if (ret) { @@ -15638,7 +16374,7 @@ retry: * BIOS-programmed watermarks untouched and hope for the best. */ WARN(true, "Could not determine valid watermarks for inherited state\n"); - goto fail; + goto put_state; } /* Write calculated watermark values back */ @@ -15646,20 +16382,20 @@ retry: struct intel_crtc_state *cs = to_intel_crtc_state(cstate); cs->wm.need_postvbl_update = true; - dev_priv->display.optimize_watermarks(cs); + dev_priv->display.optimize_watermarks(intel_state, cs); } - drm_atomic_state_free(state); +put_state: + drm_atomic_state_put(state); fail: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); } -void intel_modeset_init(struct drm_device *dev) +int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; - int sprite, ret; enum pipe pipe; struct intel_crtc *crtc; @@ -15677,10 +16413,10 @@ void intel_modeset_init(struct drm_device *dev) intel_init_quirks(dev); - intel_init_pm(dev); + intel_init_pm(dev_priv); - if (INTEL_INFO(dev)->num_pipes == 0) - return; + if (INTEL_INFO(dev_priv)->num_pipes == 0) + return 0; /* * There may be no VBT; and if the BIOS enabled SSC we can @@ -15688,7 +16424,7 @@ void intel_modeset_init(struct drm_device *dev) * BIOS isn't using it, don't assume it will work even if the VBT * indicates as much. */ - if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { + if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & DREF_SSC1_ENABLE); @@ -15700,10 +16436,10 @@ void intel_modeset_init(struct drm_device *dev) } } - if (IS_GEN2(dev)) { + if (IS_GEN2(dev_priv)) { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; - } else if (IS_GEN3(dev)) { + } else if (IS_GEN3(dev_priv)) { dev->mode_config.max_width = 4096; dev->mode_config.max_height = 4096; } else { @@ -15711,10 +16447,10 @@ void intel_modeset_init(struct drm_device *dev) dev->mode_config.max_height = 8192; } - if (IS_845G(dev) || IS_I865G(dev)) { - dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; + if (IS_845G(dev_priv) || IS_I865G(dev_priv)) { + dev->mode_config.cursor_width = IS_845G(dev_priv) ? 64 : 512; dev->mode_config.cursor_height = 1023; - } else if (IS_GEN2(dev)) { + } else if (IS_GEN2(dev_priv)) { dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; } else { @@ -15725,29 +16461,30 @@ void intel_modeset_init(struct drm_device *dev) dev->mode_config.fb_base = ggtt->mappable_base; DRM_DEBUG_KMS("%d display pipe%s available.\n", - INTEL_INFO(dev)->num_pipes, - INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); + INTEL_INFO(dev_priv)->num_pipes, + INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); for_each_pipe(dev_priv, pipe) { - intel_crtc_init(dev, pipe); - for_each_sprite(dev_priv, pipe, sprite) { - ret = intel_plane_init(dev, pipe, sprite); - if (ret) - DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", - pipe_name(pipe), sprite_name(pipe, sprite), ret); + int ret; + + ret = intel_crtc_init(dev_priv, pipe); + if (ret) { + drm_mode_config_cleanup(dev); + return ret; } } intel_update_czclk(dev_priv); - intel_update_cdclk(dev); + intel_update_cdclk(dev_priv); + dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; intel_shared_dpll_init(dev); if (dev_priv->max_cdclk_freq == 0) - intel_update_max_cdclk(dev); + intel_update_max_cdclk(dev_priv); /* Just disable it once at startup */ - i915_disable_vga(dev); + i915_disable_vga(dev_priv); intel_setup_outputs(dev); drm_modeset_lock_all(dev); @@ -15783,6 +16520,8 @@ void intel_modeset_init(struct drm_device *dev) * since the watermark calculation done here will use pstate->fb. */ sanitize_watermarks(dev); + + return 0; } static void intel_enable_pipe_a(struct drm_device *dev) @@ -15812,11 +16551,10 @@ static void intel_enable_pipe_a(struct drm_device *dev) static bool intel_check_plane_mapping(struct intel_crtc *crtc) { - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); u32 val; - if (INTEL_INFO(dev)->num_pipes == 1) + if (INTEL_INFO(dev_priv)->num_pipes == 1) return true; val = I915_READ(DSPCNTR(!crtc->plane)); @@ -15839,15 +16577,22 @@ static bool intel_crtc_has_encoders(struct intel_crtc *crtc) return false; } -static bool intel_encoder_has_connectors(struct intel_encoder *encoder) +static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct intel_connector *connector; for_each_connector_on_encoder(dev, &encoder->base, connector) - return true; + return connector; - return false; + return NULL; +} + +static bool has_pch_trancoder(struct drm_i915_private *dev_priv, + enum transcoder pch_transcoder) +{ + return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || + (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); } static void intel_sanitize_crtc(struct intel_crtc *crtc) @@ -15883,7 +16628,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) /* We need to sanitize the plane -> pipe mapping first because this will * disable the crtc (and hence change the state) if it is wrong. Note * that gen4+ has a fixed plane -> pipe mapping. */ - if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { + if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) { bool plane; DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", @@ -15893,7 +16638,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * Temporarily change the plane mapping and disable everything * ... */ plane = crtc->plane; - to_intel_plane_state(crtc->base.primary->state)->visible = true; + to_intel_plane_state(crtc->base.primary->state)->base.visible = true; crtc->plane = !plane; intel_crtc_disable_noatomic(&crtc->base); crtc->plane = plane; @@ -15913,7 +16658,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) if (crtc->active && !intel_crtc_has_encoders(crtc)) intel_crtc_disable_noatomic(&crtc->base); - if (crtc->active || HAS_GMCH_DISPLAY(dev)) { + if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { /* * We start out with underrun reporting disabled to avoid races. * For correct bookkeeping mark this on active crtcs. @@ -15928,14 +16673,23 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) * worst a fifo underrun happens which also sets this to false. */ crtc->cpu_fifo_underrun_disabled = true; - crtc->pch_fifo_underrun_disabled = true; + /* + * We track the PCH trancoder underrun reporting state + * within the crtc. With crtc for pipe A housing the underrun + * reporting state for PCH transcoder A, crtc for pipe B housing + * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, + * and marking underrun reporting as disabled for the non-existing + * PCH transcoders B and C would prevent enabling the south + * error interrupt (see cpt_can_enable_serr_int()). + */ + if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe)) + crtc->pch_fifo_underrun_disabled = true; } } static void intel_sanitize_encoder(struct intel_encoder *encoder) { struct intel_connector *connector; - struct drm_device *dev = encoder->base.dev; /* We need to check both for a crtc link (meaning that the * encoder is active and trying to read from a pipe) and the @@ -15943,7 +16697,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) bool has_active_crtc = encoder->base.crtc && to_intel_crtc(encoder->base.crtc)->active; - if (intel_encoder_has_connectors(encoder) && !has_active_crtc) { + connector = intel_encoder_find_connector(encoder); + if (connector && !has_active_crtc) { DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", encoder->base.base.id, encoder->base.name); @@ -15952,12 +16707,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) * fallout from our resume register restoring. Disable * the encoder manually again. */ if (encoder->base.crtc) { + struct drm_crtc_state *crtc_state = encoder->base.crtc->state; + DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", encoder->base.base.id, encoder->base.name); - encoder->disable(encoder); + encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); if (encoder->post_disable) - encoder->post_disable(encoder); + encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); } encoder->base.crtc = NULL; @@ -15965,32 +16722,26 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) * a bug in one of the get_hw_state functions. Or someplace else * in our code, like the register restore mess on resume. Clamp * things to off as a safer default. */ - for_each_intel_connector(dev, connector) { - if (connector->encoder != encoder) - continue; - connector->base.dpms = DRM_MODE_DPMS_OFF; - connector->base.encoder = NULL; - } + + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; } /* Enabled encoders without active connectors will be fixed in * the crtc fixup. */ } -void i915_redisable_vga_power_on(struct drm_device *dev) +void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - i915_reg_t vga_reg = i915_vgacntrl_reg(dev); + i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); - i915_disable_vga(dev); + i915_disable_vga(dev_priv); } } -void i915_redisable_vga(struct drm_device *dev) +void i915_redisable_vga(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = to_i915(dev); - /* This function can be called both from intel_modeset_setup_hw_state or * at a very early point in our resume sequence, where the power well * structures are not yet restored. Since this function is at a very @@ -16001,7 +16752,7 @@ void i915_redisable_vga(struct drm_device *dev) if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) return; - i915_redisable_vga_power_on(dev); + i915_redisable_vga_power_on(dev_priv); intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); } @@ -16020,10 +16771,10 @@ static void readout_plane_state(struct intel_crtc *crtc) struct intel_plane_state *plane_state = to_intel_plane_state(primary->state); - plane_state->visible = crtc->active && + plane_state->base.visible = crtc->active && primary_get_hw_state(to_intel_plane(primary)); - if (plane_state->visible) + if (plane_state->base.visible) crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); } @@ -16040,7 +16791,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = crtc->config; - int pixclk = 0; __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); memset(crtc_state, 0, sizeof(*crtc_state)); @@ -16052,28 +16802,14 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->base.enabled = crtc_state->base.enable; crtc->active = crtc_state->base.active; - if (crtc_state->base.active) { + if (crtc_state->base.active) dev_priv->active_crtcs |= 1 << crtc->pipe; - if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - pixclk = ilk_pipe_pixel_rate(crtc_state); - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - pixclk = crtc_state->base.adjusted_mode.crtc_clock; - else - WARN_ON(dev_priv->display.modeset_calc_cdclk); - - /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) - pixclk = DIV_ROUND_UP(pixclk * 100, 95); - } - - dev_priv->min_pixclk[crtc->pipe] = pixclk; - readout_plane_state(crtc); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", crtc->base.base.id, crtc->base.name, - crtc->active ? "enabled" : "disabled"); + enableddisabled(crtc->active)); } for (i = 0; i < dev_priv->num_shared_dpll; i++) { @@ -16096,7 +16832,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pipe = 0; if (encoder->get_hw_state(encoder, &pipe)) { - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + encoder->base.crtc = &crtc->base; crtc->config->output_types |= 1 << encoder->type; encoder->get_config(encoder, crtc->config); @@ -16105,9 +16842,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) } DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", - encoder->base.base.id, - encoder->base.name, - encoder->base.crtc ? "enabled" : "disabled", + encoder->base.base.id, encoder->base.name, + enableddisabled(encoder->base.crtc), pipe_name(pipe)); } @@ -16136,12 +16872,13 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) connector->base.encoder = NULL; } DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", - connector->base.base.id, - connector->base.name, - connector->base.encoder ? "enabled" : "disabled"); + connector->base.base.id, connector->base.name, + enableddisabled(connector->base.encoder)); } for_each_intel_crtc(dev, crtc) { + int pixclk = 0; + crtc->base.hwmode = crtc->config->base.adjusted_mode; memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); @@ -16169,10 +16906,23 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) */ crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) + pixclk = ilk_pipe_pixel_rate(crtc->config); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + pixclk = crtc->config->base.adjusted_mode.crtc_clock; + else + WARN_ON(dev_priv->display.modeset_calc_cdclk); + + /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ + if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled) + pixclk = DIV_ROUND_UP(pixclk * 100, 95); + drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); update_scanline_offset(crtc); } + dev_priv->min_pixclk[crtc->pipe] = pixclk; + intel_pipe_config_sanity_check(dev_priv, crtc->config); } } @@ -16197,7 +16947,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev) } for_each_pipe(dev_priv, pipe) { - crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + intel_sanitize_crtc(crtc); intel_dump_pipe_config(crtc, crtc->config, "[setup_hw_state]"); @@ -16217,11 +16968,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev) pll->on = false; } - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_wm_get_hw_state(dev); - else if (IS_GEN9(dev)) + else if (IS_GEN9(dev_priv)) skl_wm_get_hw_state(dev); - else if (HAS_PCH_SPLIT(dev)) + else if (HAS_PCH_SPLIT(dev_priv)) ilk_wm_get_hw_state(dev); for_each_intel_crtc(dev, crtc) { @@ -16271,10 +17022,9 @@ void intel_display_resume(struct drm_device *dev) drm_modeset_acquire_fini(&ctx); mutex_unlock(&dev->mode_config.mutex); - if (ret) { + if (ret) DRM_ERROR("Restoring old state failed with %i\n", ret); - drm_atomic_state_free(state); - } + drm_atomic_state_put(state); } void intel_modeset_gem_init(struct drm_device *dev) @@ -16282,7 +17032,6 @@ void intel_modeset_gem_init(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *c; struct drm_i915_gem_object *obj; - int ret; intel_init_gt_powersave(dev_priv); @@ -16296,15 +17045,17 @@ void intel_modeset_gem_init(struct drm_device *dev) * for this. */ for_each_crtc(dev, c) { + struct i915_vma *vma; + obj = intel_fb_obj(c->primary->fb); if (obj == NULL) continue; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(c->primary->fb, + vma = intel_pin_and_fence_fb_obj(c->primary->fb, c->primary->state->rotation); mutex_unlock(&dev->struct_mutex); - if (ret) { + if (IS_ERR(vma)) { DRM_ERROR("failed to pin boot fb on pipe %d\n", to_intel_crtc(c)->pipe); drm_framebuffer_unreference(c->primary->fb); @@ -16385,10 +17136,9 @@ void intel_connector_attach_encoder(struct intel_connector *connector, /* * set vga decode state - true == enable VGA decode */ -int intel_modeset_vga_set_state(struct drm_device *dev, bool state) +int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state) { - struct drm_i915_private *dev_priv = to_i915(dev); - unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; + unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; u16 gmch_ctrl; if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { @@ -16412,6 +17162,8 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state) return 0; } +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + struct intel_display_error_state { u32 power_well_driver; @@ -16540,17 +17292,16 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) void intel_display_print_error_state(struct drm_i915_error_state_buf *m, - struct drm_device *dev, + struct drm_i915_private *dev_priv, struct intel_display_error_state *error) { - struct drm_i915_private *dev_priv = to_i915(dev); int i; if (!error) return; - err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes); + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) err_printf(m, "PWR_WELL_CTL2: %08x\n", error->power_well_driver); for_each_pipe(dev_priv, i) { @@ -16563,13 +17314,13 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, "Plane [%d]:\n", i); err_printf(m, " CNTR: %08x\n", error->plane[i].control); err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); - if (INTEL_INFO(dev)->gen <= 3) { + if (INTEL_GEN(dev_priv) <= 3) { err_printf(m, " SIZE: %08x\n", error->plane[i].size); err_printf(m, " POS: %08x\n", error->plane[i].pos); } - if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) + if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) err_printf(m, " ADDR: %08x\n", error->plane[i].addr); - if (INTEL_INFO(dev)->gen >= 4) { + if (INTEL_GEN(dev_priv) >= 4) { err_printf(m, " SURF: %08x\n", error->plane[i].surface); err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); } @@ -16594,3 +17345,5 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); } } + +#endif |
