diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/dml')
5 files changed, 116 insertions, 31 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index d2b89c50be2a..197df404761a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -26,7 +26,6 @@ #include "resource.h" #include "clk_mgr.h" -#include "dc_link_dp.h" #include "dchubbub.h" #include "dcn20/dcn20_resource.h" #include "dcn21/dcn21_resource.h" @@ -1004,6 +1003,39 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc } } +static void dcn20_adjust_freesync_v_startup( + const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) +{ + struct dc_crtc_timing patched_crtc_timing; + uint32_t asic_blank_end = 0; + uint32_t asic_blank_start = 0; + uint32_t newVstartup = 0; + + patched_crtc_timing = *dc_crtc_timing; + + if (patched_crtc_timing.flags.INTERLACE == 1) { + if (patched_crtc_timing.v_front_porch < 2) + patched_crtc_timing.v_front_porch = 2; + } else { + if (patched_crtc_timing.v_front_porch < 1) + patched_crtc_timing.v_front_porch = 1; + } + + /* blank_start = frame end - front porch */ + asic_blank_start = patched_crtc_timing.v_total - + patched_crtc_timing.v_front_porch; + + /* blank_end = blank_start - active */ + asic_blank_end = asic_blank_start - + patched_crtc_timing.v_border_bottom - + patched_crtc_timing.v_addressable - + patched_crtc_timing.v_border_top; + + newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + + *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); +} + void dcn20_calculate_dlg_params( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -1063,6 +1095,11 @@ void dcn20_calculate_dlg_params( context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; + if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) + dcn20_adjust_freesync_v_startup( + &context->res_ctx.pipe_ctx[i].stream->timing, + &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); + pipe_idx++; } /*save a original dppclock copy*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 7feb8759e475..0dc1a03999b6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -24,7 +24,6 @@ * */ #include "dcn32_fpu.h" -#include "dc_link_dp.h" #include "dcn32/dcn32_resource.h" #include "dcn20/dcn20_resource.h" #include "display_mode_vba_util_32.h" @@ -692,7 +691,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * to combine this with SubVP can cause issues with the scheduling). * - Not TMZ surface */ - if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && + if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !dcn32_is_psr_capable(pipe) && pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface && (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 || (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 && @@ -1272,6 +1271,38 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) return false; } +static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) +{ + struct dc_crtc_timing patched_crtc_timing; + uint32_t asic_blank_end = 0; + uint32_t asic_blank_start = 0; + uint32_t newVstartup = 0; + + patched_crtc_timing = *dc_crtc_timing; + + if (patched_crtc_timing.flags.INTERLACE == 1) { + if (patched_crtc_timing.v_front_porch < 2) + patched_crtc_timing.v_front_porch = 2; + } else { + if (patched_crtc_timing.v_front_porch < 1) + patched_crtc_timing.v_front_porch = 1; + } + + /* blank_start = frame end - front porch */ + asic_blank_start = patched_crtc_timing.v_total - + patched_crtc_timing.v_front_porch; + + /* blank_end = blank_start - active */ + asic_blank_end = asic_blank_start - + patched_crtc_timing.v_border_bottom - + patched_crtc_timing.v_addressable - + patched_crtc_timing.v_border_top; + + newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + + *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); +} + static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel) @@ -1375,6 +1406,11 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context, } } + if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid) + dcn20_adjust_freesync_v_startup( + &context->res_ctx.pipe_ctx[i].stream->timing, + &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start); + pipe_idx++; } /* If DCN isn't making memory requests we can allow pstate change and lower clocks */ @@ -1601,16 +1637,12 @@ bool dcn32_internal_validate_bw(struct dc *dc, * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2) */ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = - dm_prefetch_support_fclk_and_stutter; + dm_prefetch_support_none; + context->bw_ctx.dml.validate_max_state = fast_validate; vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - /* Last attempt with Prefetch mode 2 (dm_prefetch_support_stutter == 3) */ - if (vlevel == context->bw_ctx.dml.soc.num_states) { - context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = - dm_prefetch_support_stutter; - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - } + context->bw_ctx.dml.validate_max_state = false; if (vlevel < context->bw_ctx.dml.soc.num_states) { memset(split, 0, sizeof(split)); @@ -2499,8 +2531,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa } /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) + if (dc->ctx->dc_bios->vram_info.num_chans) { dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + } if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 6c5ab5c26b38..23e50d15e9ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -1639,9 +1639,14 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman static void mode_support_configuration(struct vba_vars_st *v, struct display_mode_lib *mode_lib) { - int i, j; + int i, j, start_state; - for (i = v->soc.num_states - 1; i >= 0; i--) { + if (mode_lib->validate_max_state) + start_state = v->soc.num_states - 1; + else + start_state = 0; + + for (i = v->soc.num_states - 1; i >= start_state; i--) { for (j = 0; j < 2; j++) { if (mode_lib->vba.ScaleRatioAndTapsSupport == true && mode_lib->vba.SourceFormatPixelAndScanSupport == true @@ -1710,7 +1715,7 @@ static void mode_support_configuration(struct vba_vars_st *v, void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) { struct vba_vars_st *v = &mode_lib->vba; - int i, j; + int i, j, start_state; unsigned int k, m; unsigned int MaximumMPCCombine; unsigned int NumberOfNonCombinedSurfaceOfMaximumBandwidth; @@ -1723,6 +1728,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l #endif /*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/ + if (mode_lib->validate_max_state) + start_state = v->soc.num_states - 1; + else + start_state = 0; /*Scale Ratio, taps Support Check*/ @@ -2012,7 +2021,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MPCCombineMethodIncompatible = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage && v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible; - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { mode_lib->vba.TotalNumberOfActiveDPP[i][j] = 0; mode_lib->vba.TotalAvailablePipesSupport[i][j] = true; @@ -2289,7 +2298,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { mode_lib->vba.ExceededMultistreamSlots[i] = false; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k) { @@ -2389,7 +2398,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = false; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { if (mode_lib->vba.BlendingAndTiming[k] == k @@ -2406,7 +2415,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { mode_lib->vba.ODMCombine2To1SupportCheckOK[i] = true; mode_lib->vba.ODMCombine4To1SupportCheckOK[i] = true; for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { @@ -2424,7 +2433,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } } - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false; for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -2461,7 +2470,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /* Check DSC Unit and Slices Support */ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0; - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { mode_lib->vba.NotEnoughDSCUnits[i] = false; mode_lib->vba.NotEnoughDSCSlices[i] = false; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0; @@ -2496,7 +2505,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } /*DSC Delay per state*/ - for (i = 0; i < v->soc.num_states; ++i) { + for (i = start_state; i < v->soc.num_states; ++i) { for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { mode_lib->vba.DSCDelayPerState[i][k] = dml32_DSCDelayRequirement( mode_lib->vba.RequiresDSC[i][k], mode_lib->vba.ODMCombineEnablePerState[i][k], @@ -2523,7 +2532,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l //Calculate Swath, DET Configuration, DCFCLKDeepSleep // - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { mode_lib->vba.RequiredDPPCLKThisState[k] = mode_lib->vba.RequiredDPPCLK[i][j][k]; @@ -2661,7 +2670,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.SurfaceSizeInMALL, &mode_lib->vba.ExceededMALLSize); - for (i = 0; i < v->soc.num_states; i++) { + for (i = start_state; i < v->soc.num_states; i++) { for (j = 0; j < 2; j++) { for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { mode_lib->vba.swath_width_luma_ub_this_state[k] = @@ -2888,7 +2897,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } //Calculate Return BW - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { if (mode_lib->vba.BlendingAndTiming[k] == k) { @@ -2967,7 +2976,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &mode_lib->vba.MinPrefetchMode, &mode_lib->vba.MaxPrefetchMode); - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) mode_lib->vba.DCFCLKState[i][j] = mode_lib->vba.DCFCLKPerState[i]; } @@ -3089,7 +3098,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.DCFCLKState); } // UseMinimumRequiredDCFCLK == true - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { mode_lib->vba.ReturnBWPerState[i][j] = dml32_get_return_bw_mbps(&mode_lib->vba.soc, i, mode_lib->vba.HostVMEnable, mode_lib->vba.DCFCLKState[i][j], @@ -3098,7 +3107,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } //Re-ordering Buffer Support Check - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024 / mode_lib->vba.ReturnBWPerState[i][j] @@ -3120,7 +3129,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l + mode_lib->vba.ReadBandwidthChroma[k]; } - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][j] = dml_min3(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKState[i][j] @@ -3144,7 +3153,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l /* Prefetch Check */ - for (i = 0; i < (int) v->soc.num_states; ++i) { + for (i = start_state; i < (int) v->soc.num_states; ++i) { for (j = 0; j <= 1; ++j) { mode_lib->vba.TimeCalc = 24 / mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j]; @@ -3662,7 +3671,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l MaximumMPCCombine = 0; - for (i = v->soc.num_states; i >= 0; i--) { + for (i = v->soc.num_states; i >= start_state; i--) { if (i == v->soc.num_states || mode_lib->vba.ModeSupport[i][0] == true || mode_lib->vba.ModeSupport[i][1] == true) { mode_lib->vba.VoltageLevel = i; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index 0ea406145c1d..b80cef70fa60 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -534,8 +534,11 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p } /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) + if (dc->ctx->dc_bios->vram_info.num_chans) { dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc, + dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel); + } if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index 3d643d50c3eb..a9d49ef58fb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -91,6 +91,7 @@ struct display_mode_lib { struct dal_logger *logger; struct dml_funcs funcs; struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6]; + bool validate_max_state; }; void dml_init_instance(struct display_mode_lib *lib, |