From f30a3bea92bdab398531129d187629fb1d28f598 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 12 Feb 2024 16:51:59 -0500 Subject: drm/amd/display: Exit idle optimizations before HDCP execution [WHY] PSP can access DCN registers during command submission and we need to ensure that DCN is not in PG before doing so. [HOW] Add a callback to DM to lock and notify DC for idle optimization exit. It can't be DC directly because of a potential race condition with the link protection thread and the rest of DM operation. Cc: Mario Limonciello Cc: Alex Deucher Reviewed-by: Charlene Liu Acked-by: Alex Hung Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c | 10 ++++++++++ drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h | 8 ++++++++ 2 files changed, 18 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index 5e01c6e24cbc..9a5a1726acaf 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -88,6 +88,14 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp) !hdcp->connection.is_hdcp2_revoked; } +static void exit_idle_optimizations(struct mod_hdcp *hdcp) +{ + struct mod_hdcp_dm *dm = &hdcp->config.dm; + + if (dm->funcs.exit_idle_optimizations) + dm->funcs.exit_idle_optimizations(dm->handle); +} + static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, struct mod_hdcp_event_context *event_ctx, union mod_hdcp_transition_input *input) @@ -543,6 +551,8 @@ enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp, memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context)); event_ctx.event = event; + exit_idle_optimizations(hdcp); + /* execute and transition */ exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input); trans_status = transition( diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index a4d344a4db9e..cdb17b093f2b 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -156,6 +156,13 @@ struct mod_hdcp_ddc { } funcs; }; +struct mod_hdcp_dm { + void *handle; + struct { + void (*exit_idle_optimizations)(void *handle); + } funcs; +}; + struct mod_hdcp_psp { void *handle; void *funcs; @@ -272,6 +279,7 @@ struct mod_hdcp_display_query { struct mod_hdcp_config { struct mod_hdcp_psp psp; struct mod_hdcp_ddc ddc; + struct mod_hdcp_dm dm; uint8_t index; }; -- cgit v1.2.3-70-g09d2 From f2703a3596a279b0be6eeed4c500bdbaa8dc3ce4 Mon Sep 17 00:00:00 2001 From: Martin Leung Date: Mon, 26 Feb 2024 13:20:08 -0500 Subject: drm/amd/display: revert Exit idle optimizations before HDCP execution why and how: causes black screen on PNP on DCN 3.5 This reverts commit f30a3bea92bd ("drm/amd/display: Exit idle optimizations before HDCP execution") Cc: Mario Limonciello Cc: Alex Deucher Reviewed-by: Nicholas Kazlauskas Acked-by: Wayne Lin Signed-off-by: Martin Leung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c | 10 ---------- drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h | 8 -------- 2 files changed, 18 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index 9a5a1726acaf..5e01c6e24cbc 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -88,14 +88,6 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp) !hdcp->connection.is_hdcp2_revoked; } -static void exit_idle_optimizations(struct mod_hdcp *hdcp) -{ - struct mod_hdcp_dm *dm = &hdcp->config.dm; - - if (dm->funcs.exit_idle_optimizations) - dm->funcs.exit_idle_optimizations(dm->handle); -} - static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, struct mod_hdcp_event_context *event_ctx, union mod_hdcp_transition_input *input) @@ -551,8 +543,6 @@ enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp, memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context)); event_ctx.event = event; - exit_idle_optimizations(hdcp); - /* execute and transition */ exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input); trans_status = transition( diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index cdb17b093f2b..a4d344a4db9e 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -156,13 +156,6 @@ struct mod_hdcp_ddc { } funcs; }; -struct mod_hdcp_dm { - void *handle; - struct { - void (*exit_idle_optimizations)(void *handle); - } funcs; -}; - struct mod_hdcp_psp { void *handle; void *funcs; @@ -279,7 +272,6 @@ struct mod_hdcp_display_query { struct mod_hdcp_config { struct mod_hdcp_psp psp; struct mod_hdcp_ddc ddc; - struct mod_hdcp_dm dm; uint8_t index; }; -- cgit v1.2.3-70-g09d2 From e2fdd5c5257dcb5afcd5557d4b009e4982d86da6 Mon Sep 17 00:00:00 2001 From: George Shen Date: Wed, 7 Feb 2024 14:40:34 -0500 Subject: drm/amd/display: Add left edge pixel for YCbCr422/420 + ODM pipe split [WHY] Currently 3-tap chroma subsampling is used for YCbCr422/420. When ODM pipesplit is used, pixels on the left edge of ODM slices need one extra pixel from the right edge of the previous slice to calculate the correct chroma value. Without this change, the chroma value is slightly different than expected. This is usually imperceptible visually, but it impacts test pattern CRCs for compliance test automation. [HOW] Update logic to use the register for adding extra left edge pixel for YCbCr422/420 ODM cases. Cc: Mario Limonciello Cc: Alex Deucher Reviewed-by: Alvin Lee Acked-by: Alex Hung Signed-off-by: George Shen Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 4 +++ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 37 ++++++++++++++++++++++ .../drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 10 ++++++ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 ++ drivers/gpu/drm/amd/display/dc/inc/resource.h | 4 +++ 5 files changed, 57 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index e7dc128f6284..7f3e7c49a486 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3167,6 +3167,10 @@ static bool update_planes_and_stream_state(struct dc *dc, if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) resource_build_test_pattern_params(&context->res_ctx, otg_master); + + if (otg_master && (otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 || + otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)) + resource_build_subsampling_params(&context->res_ctx, otg_master); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index ec4bf9432bdb..798e6f7fa4e3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -828,6 +828,16 @@ static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ct stream->timing.v_border_bottom + stream->timing.v_border_top; + /* Recout for ODM slices after the first slice need one extra left edge pixel + * for 3-tap chroma subsampling. + */ + if (odm_slice_idx > 0 && + (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 || + pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)) { + odm_rec.x -= 1; + odm_rec.width += 1; + } + return odm_rec; } @@ -1444,6 +1454,7 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx, enum controller_dp_test_pattern controller_test_pattern; enum controller_dp_color_space controller_color_space; enum dc_color_depth color_depth = otg_master->stream->timing.display_color_depth; + enum dc_pixel_encoding pixel_encoding = otg_master->stream->timing.pixel_encoding; int h_active = otg_master->stream->timing.h_addressable + otg_master->stream->timing.h_border_left + otg_master->stream->timing.h_border_right; @@ -1475,10 +1486,36 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx, else params->width = last_odm_slice_width; + /* Extra left edge pixel is required for 3-tap chroma subsampling. */ + if (i != 0 && (pixel_encoding == PIXEL_ENCODING_YCBCR422 || + pixel_encoding == PIXEL_ENCODING_YCBCR420)) { + params->offset -= 1; + params->width += 1; + } + offset += odm_slice_width; } } +void resource_build_subsampling_params(struct resource_context *res_ctx, + struct pipe_ctx *otg_master) +{ + struct pipe_ctx *opp_heads[MAX_PIPES]; + int odm_cnt = 1; + int i; + + odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads); + + /* For ODM slices after the first slice, extra left edge pixel is required + * for 3-tap chroma subsampling. + */ + if (otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 || + otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { + for (i = 0; i < odm_cnt; i++) + opp_heads[i]->stream_res.left_edge_extra_pixel = (i == 0) ? false : true; + } +} + bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 8b3536c380b8..4dfc2dff5e01 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1579,6 +1579,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state, if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) new_pipe->update_flags.bits.tg_changed = 1; + if (resource_is_pipe_type(new_pipe, OPP_HEAD)) { + if (old_pipe->stream_res.left_edge_extra_pixel != new_pipe->stream_res.left_edge_extra_pixel) + new_pipe->update_flags.bits.opp_changed = 1; + } + /* * Detect mpcc blending changes, only dpp inst and opp matter here, * mpccs getting removed/inserted update connected ones during their own @@ -1962,6 +1967,11 @@ static void dcn20_program_pipe( pipe_ctx->stream_res.opp, &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping); + + if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) + pipe_ctx->stream_res.opp->funcs->opp_program_left_edge_extra_pixel( + pipe_ctx->stream_res.opp, + pipe_ctx->stream_res.left_edge_extra_pixel); } /* Set ABM pipe after other pipe configurations done */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index b1b72e688f74..e034cbb40620 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -333,6 +333,8 @@ struct stream_resource { uint8_t gsl_group; struct test_pattern_params test_pattern_params; + + bool left_edge_extra_pixel; }; struct plane_resource { diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 77a60aa9f27b..b14d52e52fa2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -107,6 +107,10 @@ void resource_build_test_pattern_params( struct resource_context *res_ctx, struct pipe_ctx *pipe_ctx); +void resource_build_subsampling_params( + struct resource_context *res_ctx, + struct pipe_ctx *pipe_ctx); + bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx); enum dc_status resource_build_scaling_params_for_context( -- cgit v1.2.3-70-g09d2 From ffd8e4a3261ba2101445750a0d9fab4b35aa6662 Mon Sep 17 00:00:00 2001 From: Gabe Teeger Date: Wed, 28 Feb 2024 12:42:57 -0500 Subject: drm/amd/display: Revert Add left edge pixel + ODM pipe split This reverts commit e2fdd5c5257d ("drm/amd/display: Add left edge pixel for YCbCr422/420 + ODM pipe split") Cc: Mario Limonciello Cc: Alex Deucher Reviewed-by: George Shen Reviewed-by: Charlene Liu Reviewed-by: Jun Lei Acked-by: Wayne Lin Signed-off-by: Gabe Teeger Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 4 --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 37 ---------------------- .../drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 10 ------ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 -- drivers/gpu/drm/amd/display/dc/inc/resource.h | 4 --- 5 files changed, 57 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 7f3e7c49a486..e7dc128f6284 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3167,10 +3167,6 @@ static bool update_planes_and_stream_state(struct dc *dc, if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) resource_build_test_pattern_params(&context->res_ctx, otg_master); - - if (otg_master && (otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 || - otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)) - resource_build_subsampling_params(&context->res_ctx, otg_master); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 798e6f7fa4e3..ec4bf9432bdb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -828,16 +828,6 @@ static struct rect calculate_odm_slice_in_timing_active(struct pipe_ctx *pipe_ct stream->timing.v_border_bottom + stream->timing.v_border_top; - /* Recout for ODM slices after the first slice need one extra left edge pixel - * for 3-tap chroma subsampling. - */ - if (odm_slice_idx > 0 && - (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 || - pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)) { - odm_rec.x -= 1; - odm_rec.width += 1; - } - return odm_rec; } @@ -1454,7 +1444,6 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx, enum controller_dp_test_pattern controller_test_pattern; enum controller_dp_color_space controller_color_space; enum dc_color_depth color_depth = otg_master->stream->timing.display_color_depth; - enum dc_pixel_encoding pixel_encoding = otg_master->stream->timing.pixel_encoding; int h_active = otg_master->stream->timing.h_addressable + otg_master->stream->timing.h_border_left + otg_master->stream->timing.h_border_right; @@ -1486,36 +1475,10 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx, else params->width = last_odm_slice_width; - /* Extra left edge pixel is required for 3-tap chroma subsampling. */ - if (i != 0 && (pixel_encoding == PIXEL_ENCODING_YCBCR422 || - pixel_encoding == PIXEL_ENCODING_YCBCR420)) { - params->offset -= 1; - params->width += 1; - } - offset += odm_slice_width; } } -void resource_build_subsampling_params(struct resource_context *res_ctx, - struct pipe_ctx *otg_master) -{ - struct pipe_ctx *opp_heads[MAX_PIPES]; - int odm_cnt = 1; - int i; - - odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads); - - /* For ODM slices after the first slice, extra left edge pixel is required - * for 3-tap chroma subsampling. - */ - if (otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422 || - otg_master->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { - for (i = 0; i < odm_cnt; i++) - opp_heads[i]->stream_res.left_edge_extra_pixel = (i == 0) ? false : true; - } -} - bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 4dfc2dff5e01..8b3536c380b8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1579,11 +1579,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state, if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) new_pipe->update_flags.bits.tg_changed = 1; - if (resource_is_pipe_type(new_pipe, OPP_HEAD)) { - if (old_pipe->stream_res.left_edge_extra_pixel != new_pipe->stream_res.left_edge_extra_pixel) - new_pipe->update_flags.bits.opp_changed = 1; - } - /* * Detect mpcc blending changes, only dpp inst and opp matter here, * mpccs getting removed/inserted update connected ones during their own @@ -1967,11 +1962,6 @@ static void dcn20_program_pipe( pipe_ctx->stream_res.opp, &pipe_ctx->stream->bit_depth_params, &pipe_ctx->stream->clamping); - - if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) - pipe_ctx->stream_res.opp->funcs->opp_program_left_edge_extra_pixel( - pipe_ctx->stream_res.opp, - pipe_ctx->stream_res.left_edge_extra_pixel); } /* Set ABM pipe after other pipe configurations done */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index e034cbb40620..b1b72e688f74 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -333,8 +333,6 @@ struct stream_resource { uint8_t gsl_group; struct test_pattern_params test_pattern_params; - - bool left_edge_extra_pixel; }; struct plane_resource { diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index b14d52e52fa2..77a60aa9f27b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -107,10 +107,6 @@ void resource_build_test_pattern_params( struct resource_context *res_ctx, struct pipe_ctx *pipe_ctx); -void resource_build_subsampling_params( - struct resource_context *res_ctx, - struct pipe_ctx *pipe_ctx); - bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx); enum dc_status resource_build_scaling_params_for_context( -- cgit v1.2.3-70-g09d2 From bd1c92a1623048ec6c73567ca5a229d28e18c968 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Mon, 11 Dec 2023 10:48:00 -0500 Subject: drm/amd/display: Add some forward declarations [WHAT] Add DML2 pipe and config struct forward declaration as a preparation for DML2. Reviewed-by: Chaitanya Dhere Acked-by: Alex Hung Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h index 2f91244a7b01..1538b708d8be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h @@ -30,6 +30,8 @@ #include "dml2_dc_types.h" struct dml2_context; +struct dml2_dml_to_dc_pipe_mapping; +struct dml_display_cfg_st; /* * dml2_map_dc_pipes - Creates a pipe linkage in dc_state based on current display config. -- cgit v1.2.3-70-g09d2 From dc406d92a097c669e6468ac4f694b4c927c47ab6 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Thu, 7 Mar 2024 21:31:50 +0530 Subject: drm/amdgpu: add recent pagefault info in vm_manager MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently page fault information is stored per vm and which could be freed or stale during reset. Add it pagefault information in the vm_manager which is a global space for vm's and remains valid across. Signed-off-by: Sunil Khatri Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 8 ++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 2 ++ 2 files changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 4299ce386322..81fb3465e197 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2924,6 +2924,14 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev, if (vm && status) { vm->fault_info.addr = addr; vm->fault_info.status = status; + /* + * Update the fault information globally for later usage + * when vm could be stale or freed. + */ + adev->vm_manager.fault_info.addr = addr; + adev->vm_manager.fault_info.vmhub = vmhub; + adev->vm_manager.fault_info.status = status; + if (AMDGPU_IS_GFXHUB(vmhub)) { vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX; vm->fault_info.vmhub |= diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 047ec1930d12..8efa8422f4f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -422,6 +422,8 @@ struct amdgpu_vm_manager { * look up VM of a page fault */ struct xarray pasids; + /* Global registration of recent page fault information */ + struct amdgpu_vm_fault_info fault_info; }; struct amdgpu_bo_va_mapping; -- cgit v1.2.3-70-g09d2 From caef6c453cf29b3ae619afd551cf3a0d333a3600 Mon Sep 17 00:00:00 2001 From: Aurabindo Pillai Date: Mon, 11 Dec 2023 14:51:22 -0500 Subject: drm/amd/display: Add DML2 folder to include path Add DML2 compilation rule in the Makefile. Reviewed-by: Chaitanya Dhere Acked-by: Alex Hung Signed-off-by: Aurabindo Pillai Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml2/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile index acff3449b8d7..1c9498a72520 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile @@ -67,6 +67,7 @@ frame_warn_flag := -Wframe-larger-than=2048 endif endif +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2 CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml2_wrapper.o := $(dml2_ccflags) -- cgit v1.2.3-70-g09d2 From b7b23877a2479951578dcaa8523314ed2cceccc3 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 19 Feb 2024 15:29:51 +0800 Subject: drm/amdgpu: add new bit definitions for GC 9.0 PROTECTION_FAULT_STATUS Add UCE and FED bit definitions. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h index efc16ddf274a..2dfa0e5b1aa3 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h @@ -6822,6 +6822,8 @@ #define VM_L2_PROTECTION_FAULT_STATUS__VMID__SHIFT 0x14 #define VM_L2_PROTECTION_FAULT_STATUS__VF__SHIFT 0x18 #define VM_L2_PROTECTION_FAULT_STATUS__VFID__SHIFT 0x19 +#define VM_L2_PROTECTION_FAULT_STATUS__UCE__SHIFT 0x1d +#define VM_L2_PROTECTION_FAULT_STATUS__FED__SHIFT 0x1e #define VM_L2_PROTECTION_FAULT_STATUS__MORE_FAULTS_MASK 0x00000001L #define VM_L2_PROTECTION_FAULT_STATUS__WALKER_ERROR_MASK 0x0000000EL #define VM_L2_PROTECTION_FAULT_STATUS__PERMISSION_FAULTS_MASK 0x000000F0L @@ -6832,6 +6834,8 @@ #define VM_L2_PROTECTION_FAULT_STATUS__VMID_MASK 0x00F00000L #define VM_L2_PROTECTION_FAULT_STATUS__VF_MASK 0x01000000L #define VM_L2_PROTECTION_FAULT_STATUS__VFID_MASK 0x1E000000L +#define VM_L2_PROTECTION_FAULT_STATUS__UCE_MASK 0x20000000L +#define VM_L2_PROTECTION_FAULT_STATUS__FED_MASK 0x40000000L //VM_L2_PROTECTION_FAULT_ADDR_LO32 #define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32__SHIFT 0x0 #define VM_L2_PROTECTION_FAULT_ADDR_LO32__LOGICAL_PAGE_ADDR_LO32_MASK 0xFFFFFFFFL -- cgit v1.2.3-70-g09d2 From fb0f5f541475184f87965fa892570c053fd9eda3 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 19 Feb 2024 15:51:29 +0800 Subject: drm/amdgpu: add utcl2 poison query for gfxhub Implement it for gfxhub 1.0 and 1.2. v2: input logical xcc id for poison query interface. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h | 2 ++ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c | 17 +++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c | 15 +++++++++++++++ 3 files changed, 34 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h index c7b44aeb671b..103a837ccc71 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h @@ -38,6 +38,8 @@ struct amdgpu_gfxhub_funcs { void (*mode2_save_regs)(struct amdgpu_device *adev); void (*mode2_restore_regs)(struct amdgpu_device *adev); void (*halt)(struct amdgpu_device *adev); + bool (*query_utcl2_poison_status)(struct amdgpu_device *adev, + int xcc_id); }; struct amdgpu_gfxhub { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 22175da0e16a..d200310d1731 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -443,6 +443,22 @@ static void gfxhub_v1_0_init(struct amdgpu_device *adev) mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; } +static bool gfxhub_v1_0_query_utcl2_poison_status(struct amdgpu_device *adev, + int xcc_id) +{ + u32 status = 0; + struct amdgpu_vmhub *hub; + + if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(9, 4, 2)) + return false; + + hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; + status = RREG32(hub->vm_l2_pro_fault_status); + /* reset page fault status */ + WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); + + return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); +} const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = { .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset, @@ -452,4 +468,5 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = { .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default, .init = gfxhub_v1_0_init, .get_xgmi_info = gfxhub_v1_1_get_xgmi_info, + .query_utcl2_poison_status = gfxhub_v1_0_query_utcl2_poison_status, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index 49aecdcee006..77df8c9cbad2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -620,6 +620,20 @@ static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev) return 0; } +static bool gfxhub_v1_2_query_utcl2_poison_status(struct amdgpu_device *adev, + int xcc_id) +{ + u32 fed, status; + + status = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regVM_L2_PROTECTION_FAULT_STATUS); + fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); + /* reset page fault status */ + WREG32_P(SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), + regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + + return fed; +} + const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = { .get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset, .setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs, @@ -628,6 +642,7 @@ const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = { .set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default, .init = gfxhub_v1_2_init, .get_xgmi_info = gfxhub_v1_2_get_xgmi_info, + .query_utcl2_poison_status = gfxhub_v1_2_query_utcl2_poison_status, }; static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask) -- cgit v1.2.3-70-g09d2 From 583681d4a417a67afbcc3664d31fb58d0e59aeae Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Wed, 6 Mar 2024 22:59:34 +0530 Subject: drm/amdgpu: add vm fault information to devcoredump MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add page fault information to the devcoredump. Output of devcoredump: **** AMDGPU Device Coredump **** version: 1 kernel: 6.7.0-amd-staging-drm-next module: amdgpu time: 29.725011811 process_name: soft_recovery_p PID: 1720 Ring timed out details IP Type: 0 Ring Name: gfx_0.0.0 [gfxhub] Page fault observed Faulty page starting at address: 0x0000000000000000 Protection fault status register: 0x301031 VRAM is lost due to GPU reset! Signed-off-by: Sunil Khatri Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 147100c27c2d..6d059f853adc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -203,6 +203,18 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, coredump->ring->name); } + if (coredump->adev) { + struct amdgpu_vm_fault_info *fault_info = + &coredump->adev->vm_manager.fault_info; + + drm_printf(&p, "\n[%s] Page fault observed\n", + fault_info->vmhub ? "mmhub" : "gfxhub"); + drm_printf(&p, "Faulty page starting at address: 0x%016llx\n", + fault_info->addr); + drm_printf(&p, "Protection fault status register: 0x%x\n\n", + fault_info->status); + } + if (coredump->reset_vram_lost) drm_printf(&p, "VRAM is lost due to GPU reset!\n"); if (coredump->adev->reset_info.num_regs) { -- cgit v1.2.3-70-g09d2 From 9bbe787877096e9d4e9dec37059b251840906579 Mon Sep 17 00:00:00 2001 From: Roman Li Date: Wed, 14 Feb 2024 16:26:13 -0500 Subject: drm/amd/display: Fix function banner for amdgpu_dm_psr_disable_all() [WHY] Incorrect function name in function banner. [HOW] Correct name and brief description. Reviewed-by: Hersen Wu Reviewed-by: Aurabindo Pillai Acked-by: Alex Hung Signed-off-by: Roman Li Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 1f08c6564c3b..a48a79e84e82 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -210,7 +210,7 @@ bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) } /* - * amdgpu_dm_psr_disable() - disable psr f/w + * amdgpu_dm_psr_disable_all() - disable psr f/w for all streams * if psr is enabled on any stream * * Return: true if success -- cgit v1.2.3-70-g09d2 From 3eb899c40a6190f6eee0bea7c340a4cb32112548 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Mon, 11 Mar 2024 16:58:54 +0530 Subject: drm/amdgpu: add ring buffer information in devcoredump MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add relevant ringbuffer information such as rptr, wptr,rb mask, ring name, ring size and also the rings content for each ring on a gpu reset. Signed-off-by: Sunil Khatri Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 6d059f853adc..a0dbccad2f53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -215,6 +215,27 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, fault_info->status); } + drm_printf(&p, "Ring buffer information\n"); + for (int i = 0; i < coredump->adev->num_rings; i++) { + int j = 0; + struct amdgpu_ring *ring = coredump->adev->rings[i]; + + drm_printf(&p, "ring name: %s\n", ring->name); + drm_printf(&p, "Rptr: 0x%llx Wptr: 0x%llx RB mask: %x\n", + amdgpu_ring_get_rptr(ring), + amdgpu_ring_get_wptr(ring), + ring->buf_mask); + drm_printf(&p, "Ring size in dwords: %d\n", + ring->ring_size / 4); + drm_printf(&p, "Ring contents\n"); + drm_printf(&p, "Offset \t Value\n"); + + while (j < ring->ring_size) { + drm_printf(&p, "0x%x \t 0x%x\n", j, ring->ring[j/4]); + j += 4; + } + } + if (coredump->reset_vram_lost) drm_printf(&p, "VRAM is lost due to GPU reset!\n"); if (coredump->adev->reset_info.num_regs) { -- cgit v1.2.3-70-g09d2 From 71a8d61ebc38f0f1b96011518fefd0a1e07efa74 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 19 Feb 2024 15:55:24 +0800 Subject: drm/amdgpu: retire gfx ras query_utcl2_poison_status Replace it with related interface in gfxhub functions. v2: replace node id with xcc id. get node id for query_utcl2_poison_status Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 7 ++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 1 - drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 12 ------------ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c | 14 ++++++++++++-- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 14 ++++++++++++-- 6 files changed, 30 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 35dd6effa9a3..d5fde8adf19b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -769,10 +769,11 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, return 0; } -bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) +bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev, + int xcc_id) { - if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status) - return adev->gfx.ras->query_utcl2_poison_status(adev); + if (adev->gfxhub.funcs->query_utcl2_poison_status) + return adev->gfxhub.funcs->query_utcl2_poison_status(adev, xcc_id); else return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 0ef223c2affb..caee36e52a09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -341,7 +341,8 @@ bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev); bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); void amdgpu_amdkfd_block_mmu_notifications(void *p); int amdgpu_amdkfd_criu_resume(void *p); -bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev); +bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev, + int xcc_id); int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag, int8_t xcp_id); void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 8fcf889ddce9..04a86dff71e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -259,7 +259,6 @@ struct amdgpu_cu_info { struct amdgpu_gfx_ras { struct amdgpu_ras_block_object ras_block; void (*enable_watchdog_timer)(struct amdgpu_device *adev); - bool (*query_utcl2_poison_status)(struct amdgpu_device *adev); int (*rlc_gc_fed_irq)(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 065b2bd5f5a6..3f4fd2f08163 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1909,18 +1909,7 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } -static bool gfx_v9_4_2_query_uctl2_poison_status(struct amdgpu_device *adev) -{ - u32 status = 0; - struct amdgpu_vmhub *hub; - - hub = &adev->vmhub[AMDGPU_GFXHUB(0)]; - status = RREG32(hub->vm_l2_pro_fault_status); - /* reset page fault status */ - WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); - return REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); -} struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = { .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, @@ -1934,5 +1923,4 @@ struct amdgpu_gfx_ras gfx_v9_4_2_ras = { .hw_ops = &gfx_v9_4_2_ras_ops, }, .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer, - .query_utcl2_poison_status = gfx_v9_4_2_query_uctl2_poison_status, }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index 9a06c6fb6605..a8e76287dde0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -367,10 +367,20 @@ static void event_interrupt_wq_v10(struct kfd_node *dev, client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); + uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry); + int xcc_id = 0; struct kfd_hsa_memory_exception_data exception_data; - if (client_id == SOC15_IH_CLIENTID_UTCL2 && - amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { + if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) { + xcc_id = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, + node_id); + if (xcc_id < 0) + xcc_id = 0; + } + + if (client_id == SOC15_IH_CLIENTID_UTCL2 && !vmid_type && + amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, xcc_id)) { event_interrupt_poison_consumption(dev, pasid, client_id); return; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 91dd5e045b51..ff7392336795 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -413,10 +413,20 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); + uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry); + int xcc_id = 0; struct kfd_hsa_memory_exception_data exception_data; - if (client_id == SOC15_IH_CLIENTID_UTCL2 && - amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { + if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) { + xcc_id = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, + node_id); + if (xcc_id < 0) + xcc_id = 0; + } + + if (client_id == SOC15_IH_CLIENTID_UTCL2 && !vmid_type && + amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, xcc_id)) { event_interrupt_poison_consumption_v9(dev, pasid, client_id); return; } -- cgit v1.2.3-70-g09d2 From 0c501d3c11bba2a8a5f98bbad557465ccbfb59a3 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 19 Feb 2024 16:03:17 +0800 Subject: drm/amdgpu: skip GFX FED error in page fault handling Let kfd interrupt handler process it. v2: return 0 instead of 1 for fed error. drop the usage of strcmp in interrupt handler. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 47b63a4ce68b..05d52b9274a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -548,7 +548,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, { bool retry_fault = !!(entry->src_data[1] & 0x80); bool write_fault = !!(entry->src_data[1] & 0x20); - uint32_t status = 0, cid = 0, rw = 0; + uint32_t status = 0, cid = 0, rw = 0, fed = 0; struct amdgpu_task_info *task_info; struct amdgpu_vmhub *hub; const char *mmhub_cid; @@ -664,6 +664,14 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, status = RREG32(hub->vm_l2_pro_fault_status); cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID); rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW); + fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); + + /* for gfx fed error, kfd will handle it, return directly */ + if (fed && amdgpu_ras_is_poison_mode_supported(adev) && + (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)) && + (vmhub < AMDGPU_MMHUB0_START)) + return 0; + WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); amdgpu_vm_update_fault_cache(adev, entry->pasid, addr, status, vmhub); -- cgit v1.2.3-70-g09d2 From 2f72e02f2f54a3ccee7808db80ff8ae474a5fcba Mon Sep 17 00:00:00 2001 From: Muhammad Ahmed Date: Thu, 15 Feb 2024 15:37:09 -0500 Subject: drm/amd/display: Add debug key to allow disabling dtbclk [HOW] Add debug key to allow disabling dtbclk Reviewed-by: Charlene Liu Acked-by: Alex Hung Signed-off-by: Muhammad Ahmed Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 3 ++- drivers/gpu/drm/amd/display/dc/dc.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index c378b879c76d..c6030bed95a0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -244,7 +244,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base, } if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) { - dcn35_smu_set_dtbclk(clk_mgr, false); + if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk) + dcn35_smu_set_dtbclk(clk_mgr, false); clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en; } /* check that we're not already in lower */ diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index ee8453bf958f..c1d8288a08b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -435,6 +435,7 @@ struct dc_config { unsigned int disable_ips; unsigned int disable_ips_in_vpb; bool usb4_bw_alloc_support; + bool allow_0_dtb_clk; }; enum visual_confirm { -- cgit v1.2.3-70-g09d2 From 460ea5b3b61604171e6668ce1db2d640c4c8ae7f Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 15 Feb 2024 11:10:18 -0500 Subject: drm/amd/display: Add debug prints for IPS testing [WHY] To log commit states and when we transition in/out of allow and idle states and the caller. [HOW] Add a new logging helper and wrap idle optimization calls to receive the caller. Reviewed-by: Duncan Ma Acked-by: Alex Hung Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 10 ++- drivers/gpu/drm/amd/display/dc/dc.h | 7 ++- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 73 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/include/logger_types.h | 1 + 4 files changed, 86 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index e7dc128f6284..5c8b0183959c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -4874,11 +4874,15 @@ bool dc_set_replay_allow_active(struct dc *dc, bool active) return true; } -void dc_allow_idle_optimizations(struct dc *dc, bool allow) +void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name) { if (dc->debug.disable_idle_power_optimizations) return; + if (allow != dc->idle_optimizations_allowed) + DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__, + dc->idle_optimizations_allowed, allow, caller_name); + if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) return; @@ -4893,10 +4897,10 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow) dc->idle_optimizations_allowed = allow; } -void dc_exit_ips_for_hw_access(struct dc *dc) +void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name) { if (dc->caps.ips_support) - dc_allow_idle_optimizations(dc, false); + dc_allow_idle_optimizations_internal(dc, false, caller_name); } bool dc_dmub_is_ips_idle_state(struct dc *dc) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index c1d8288a08b3..c9485a7744d7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -2339,8 +2339,11 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr); -void dc_allow_idle_optimizations(struct dc *dc, bool allow); -void dc_exit_ips_for_hw_access(struct dc *dc); +#define dc_allow_idle_optimizations(dc, allow) dc_allow_idle_optimizations_internal(dc, allow, __func__) +#define dc_exit_ips_for_hw_access(dc) dc_exit_ips_for_hw_access_internal(dc, __func__) + +void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, const char *caller_name); +void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name); bool dc_dmub_is_ips_idle_state(struct dc *dc); /* set min and max memory clock to lowest and highest DPM level, respectively */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 6083b1dcf050..b168dfc79381 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -910,6 +910,8 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) return; } + DC_LOG_ERROR("%s: DMCUB error - collecting diagnostic data\n", __func__); + if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) { DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); return; @@ -1201,6 +1203,7 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) { + volatile const struct dmub_shared_state_ips_fw *ips_fw; struct dc_dmub_srv *dc_dmub_srv; union dmub_rb_cmd cmd = {0}; @@ -1211,6 +1214,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) return; dc_dmub_srv = dc->ctx->dmub_srv; + ips_fw = &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw; memset(&cmd, 0, sizeof(cmd)); cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; @@ -1226,6 +1230,12 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; union dmub_shared_state_ips_driver_signals new_signals; + DC_LOG_IPS( + "%s wait idle (ips1_commit=%d ips2_commit=%d)", + __func__, + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + dc_dmub_srv_wait_idle(dc->ctx->dmub_srv); memset(&new_signals, 0, sizeof(new_signals)); @@ -1250,6 +1260,13 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) ips_driver->signals = new_signals; } + DC_LOG_IPS( + "%s send allow_idle=%d (ips1_commit=%d ips2_commit=%d)", + __func__, + allow_idle, + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ /* We also do not perform a wait since DMCUB could enter idle after the notification. */ dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); @@ -1276,38 +1293,92 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) ips_driver->signals.all = 0; + DC_LOG_IPS( + "%s check (allow_ips1=%d allow_ips2=%d) (ips1_commit=%d ips2_commit=%d)", + __func__, + ips_driver->signals.bits.allow_ips1, + ips_driver->signals.bits.allow_ips2, + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + if (prev_driver_signals.bits.allow_ips2) { + DC_LOG_IPS( + "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + udelay(dc->debug.ips2_eval_delay_us); if (ips_fw->signals.bits.ips2_commit) { + DC_LOG_IPS( + "exit IPS2 #1 (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + // Tell PMFW to exit low power state dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + DC_LOG_IPS( + "wait IPS2 entry delay (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + // Wait for IPS2 entry upper bound udelay(dc->debug.ips2_entry_delay_us); + DC_LOG_IPS( + "exit IPS2 #2 (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr); + DC_LOG_IPS( + "wait IPS2 commit clear (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + while (ips_fw->signals.bits.ips2_commit) udelay(1); + DC_LOG_IPS( + "wait hw_pwr_up (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); + DC_LOG_IPS( + "resync inbox1 (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub); } } dc_dmub_srv_notify_idle(dc, false); if (prev_driver_signals.bits.allow_ips1) { + DC_LOG_IPS( + "wait for IPS1 commit clear (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); + while (ips_fw->signals.bits.ips1_commit) udelay(1); + DC_LOG_IPS( + "wait for IPS1 commit clear done (ips1_commit=%d ips2_commit=%d)", + ips_fw->signals.bits.ips1_commit, + ips_fw->signals.bits.ips2_commit); } } if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); + + DC_LOG_IPS("%s exited", __func__); } void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState) @@ -1335,6 +1406,8 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_ if (dc_dmub_srv->idle_allowed == allow_idle) return; + DC_LOG_IPS("%s state change: old=%d new=%d", __func__, dc_dmub_srv->idle_allowed, allow_idle); + /* * Entering a low power state requires a driver notification. * Powering up the hardware requires notifying PMFW and DMCUB. diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index f39e2785e618..83479951732a 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -64,6 +64,7 @@ #define DC_LOG_DWB(...) drm_dbg((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_DP2(...) drm_dbg_dp((DC_LOGGER)->dev, __VA_ARGS__) #define DC_LOG_AUTO_DPM_TEST(...) pr_debug("[AutoDPMTest]: "__VA_ARGS__) +#define DC_LOG_IPS(...) pr_debug("[IPS]: "__VA_ARGS__) struct dc_log_buffer_ctx { char *buf; -- cgit v1.2.3-70-g09d2 From 2a2d6fd86328f555e2bffc3a7445418112a16013 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sat, 17 Feb 2024 17:28:09 -0500 Subject: drm/amd/display: Add entry and exit counters [WHY & HOW] Add new counters in the shared IPS firmware state. Acked-by: Alex Hung Signed-off-by: Anthony Koo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index af3fe8bb0728..4a650ac571d7 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -724,7 +724,13 @@ union dmub_shared_state_ips_driver_signals { */ struct dmub_shared_state_ips_fw { union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */ - uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */ + uint32_t rcg_entry_count; /**< Entry counter for RCG */ + uint32_t rcg_exit_count; /**< Exit counter for RCG */ + uint32_t ips1_entry_count; /**< Entry counter for IPS1 */ + uint32_t ips1_exit_count; /**< Exit counter for IPS1 */ + uint32_t ips2_entry_count; /**< Entry counter for IPS2 */ + uint32_t ips2_exit_count; /**< Exit counter for IPS2 */ + uint32_t reserved[55]; /**< Reversed, to be updated when adding new fields. */ }; /* 248-bytes, fixed */ /** -- cgit v1.2.3-70-g09d2 From c90835b0648edb78f4630b89a2897972c3571cbd Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 18 Feb 2024 20:11:18 -0500 Subject: drm/amd/display: 3.2.274 - Change default size for dummy plane in DML2 - Enable DML2 debug flags - Add some forward declarations - Add DML2 folder to include path - Set DCN351 BB and IP the same as DCN35 - Override min required DCFCLK in dml1_validate - Fix function banner for amdgpu_dm_psr_disable_all() - Allow dirty rects to be sent to dmub when abm is active - Add debug key to allow disabling dtbclk - Add debug prints for IPS testing - Exit idle optimizations before HDCP execution - Add entry and exit counters Acked-by: Alex Hung Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index c9485a7744d7..09c6a393642a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -51,7 +51,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.273" +#define DC_VER "3.2.274" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3-70-g09d2 From eed4edda910fe34dfae8c6bfbcf57f4593a54295 Mon Sep 17 00:00:00 2001 From: Robin Chen Date: Wed, 20 Dec 2023 20:31:28 +0800 Subject: drm/amd/display: Support long vblank feature [WHY] We want to support low hz case, but the original vtotal/vtotal_min/vtotal_max can't support more than 0x7FFF. [HOW] We use the 2 HW reg to contorl long vblank case. 1. OTG_V_COUNT_STOP_CONTROL -> vcount_stop 2. OTG_V_COUNT_STOP_CONTROL2 -> vcount_stop_timer vcount_stop define from which line we stop using vcount and start using vcount2. vcount_stop_timer define how long we use vcount2. Ex: Vtotal = 7 OTG_V_COUNT_STOP_CONTROL = 4 OTG_V_COUNT_STOP_CONTROL2 = 5 time : 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 vcount : 0, 1, 2, 3, - - - - - 4, 5, 6 vcount2 : 0, 1, 2, 3, 4, Reviewed-by: Jun Lei Acked-by: Alex Hung Signed-off-by: ChunTao Tso Signed-off-by: Robin Chen Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 33 +++++ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 1 + .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 28 ++++ .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h | 3 + .../gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c | 3 +- drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h | 1 + .../drm/amd/display/dc/inc/hw/timing_generator.h | 7 + .../gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h | 6 +- .../gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c | 165 ++++++++++++++++++++- .../gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h | 6 +- .../amd/display/dc/resource/dcn35/dcn35_resource.h | 2 + .../drm/amd/display/modules/freesync/freesync.c | 2 + 12 files changed, 253 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5c8b0183959c..64ab99628125 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -386,6 +386,30 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace) *perf_trace = NULL; } +static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) +{ + if (!dc || !stream || !adjust) + return false; + + if (!dc->current_state) + return false; + + int i; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + + if (pipe->stream == stream && pipe->stream_res.tg) { + if (dc->hwss.set_long_vtotal) + dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max); + + return true; + } + } + + return false; +} + /** * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR * @dc: dc reference @@ -420,6 +444,15 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, stream->adjust.v_total_mid = adjust->v_total_mid; stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; stream->adjust.v_total_min = adjust->v_total_min; + stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt; + + if (dc->caps.max_v_total != 0 && + (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) { + if (adjust->allow_otg_v_count_halt) + return set_long_vtotal(dc, stream, adjust); + else + return false; + } for (i = 0; i < MAX_PIPES; i++) { struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index aae2f3a2660d..465e15f57f93 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -974,6 +974,7 @@ struct dc_crtc_timing_adjust { uint32_t v_total_max; uint32_t v_total_mid; uint32_t v_total_mid_frame_num; + uint32_t allow_otg_v_count_halt; }; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 4b92df23ff0d..2e8ec58a16eb 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1394,3 +1394,31 @@ void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, set_static_screen_control(pipe_ctx[i]->stream_res.tg, triggers, params->num_frames); } + +void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx, + int num_pipes, uint32_t v_total_min, uint32_t v_total_max) +{ + int i = 0; + struct long_vtotal_params params = {0}; + + params.vertical_total_max = v_total_max; + params.vertical_total_min = v_total_min; + + for (i = 0; i < num_pipes; i++) { + if (!pipe_ctx[i]) + continue; + + if (pipe_ctx[i]->stream) { + struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing; + + if (timing) + params.vertical_blank_start = timing->v_total - timing->v_front_porch; + else + params.vertical_blank_start = 0; + + if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs && + pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal) + pipe_ctx[i]->stream_res.tg->funcs->set_long_vtotal(pipe_ctx[i]->stream_res.tg, ¶ms); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index c354efa6c1b2..7c33bea3f4e7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -93,4 +93,7 @@ void dcn35_set_drr(struct pipe_ctx **pipe_ctx, void dcn35_set_static_screen_control(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_params *params); +void dcn35_set_long_vblank(struct pipe_ctx **pipe_ctx, + int num_pipes, uint32_t v_total_min, uint32_t v_total_max); + #endif /* __DC_HWSS_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index a93073055e7b..dce620d359a6 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -122,7 +122,8 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .hw_block_power_down = dcn35_hw_block_power_down, .root_clock_control = dcn35_root_clock_control, .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state + .get_idle_state = dcn35_get_idle_state, + .set_long_vtotal = dcn35_set_long_vblank, }; static const struct hwseq_private_funcs dcn35_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index f89f205e42a1..26ea86316e50 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -429,6 +429,7 @@ struct hw_sequencer_funcs { bool (*is_pipe_topology_transition_seamless)(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); + void (*set_long_vtotal)(struct pipe_ctx **pipe_ctx, int num_pipes, uint32_t v_total_min, uint32_t v_total_max); }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index ffad8fe16c54..cd68ecc242c1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -64,6 +64,12 @@ struct drr_params { bool immediate_flip; }; +struct long_vtotal_params { + uint32_t vertical_total_min; + uint32_t vertical_total_max; + uint32_t vertical_blank_start; +}; + #define LEFT_EYE_3D_PRIMARY_SURFACE 1 #define RIGHT_EYE_3D_PRIMARY_SURFACE 0 @@ -331,6 +337,7 @@ struct timing_generator_funcs { void (*init_odm)(struct timing_generator *tg); void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); + void (*set_long_vtotal)(struct timing_generator *optc, const struct long_vtotal_params *params); void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); }; diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index 6c2e84d3967f..8a45d26b7531 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -129,6 +129,8 @@ struct dcn_optc_registers { uint32_t OTG_V_TOTAL_MID; uint32_t OTG_V_TOTAL_MIN; uint32_t OTG_V_TOTAL_CONTROL; + uint32_t OTG_V_COUNT_STOP_CONTROL; + uint32_t OTG_V_COUNT_STOP_CONTROL2; uint32_t OTG_TRIGA_CNTL; uint32_t OTG_TRIGA_MANUAL_TRIG; uint32_t OTG_MANUAL_FLOW_CONTROL; @@ -581,7 +583,9 @@ struct dcn_optc_registers { type OTG_CRC1_WINDOWB_X_END_READBACK;\ type OTG_CRC1_WINDOWB_Y_START_READBACK;\ type OTG_CRC1_WINDOWB_Y_END_READBACK;\ - type OPTC_FGCG_REP_DIS; + type OPTC_FGCG_REP_DIS;\ + type OTG_V_COUNT_STOP;\ + type OTG_V_COUNT_STOP_TIMER; struct dcn_optc_shift { TG_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c index 5b1547508850..d393be30dff8 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c @@ -32,6 +32,7 @@ #include "reg_helper.h" #include "dc.h" #include "dcn_calc_math.h" +#include "dc_dmub_srv.h" #define REG(reg)\ optc1->tg_regs->reg @@ -213,6 +214,167 @@ static bool optc35_configure_crc(struct timing_generator *optc, return true; } +static void optc35_setup_manual_trigger(struct timing_generator *optc) +{ + if (!optc || !optc->ctx) + return; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + struct dc *dc = optc->ctx->dc; + + if (dc->caps.dmub_caps.mclk_sw && !dc->debug.disable_fams) + dc_dmub_srv_set_drr_manual_trigger_cmd(dc, optc->inst); + else { + /* + * MIN_MASK_EN is gone and MASK is now always enabled. + * + * To get it to it work with manual trigger we need to make sure + * we program the correct bit. + */ + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ + + // Setup manual flow control for EOF via TRIG_A + if (optc->funcs && optc->funcs->setup_manual_trigger) + optc->funcs->setup_manual_trigger(optc); + } +} + +void optc35_set_drr( + struct timing_generator *optc, + const struct drr_params *params) +{ + if (!optc || !params) + return; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t max_otg_v_total = optc1->max_v_total - 1; + + if (params != NULL && + params->vertical_total_max > 0 && + params->vertical_total_min > 0) { + + if (params->vertical_total_mid != 0) { + + REG_SET(OTG_V_TOTAL_MID, 0, + OTG_V_TOTAL_MID, params->vertical_total_mid - 1); + + REG_UPDATE_2(OTG_V_TOTAL_CONTROL, + OTG_VTOTAL_MID_REPLACING_MAX_EN, 1, + OTG_VTOTAL_MID_FRAME_NUM, + (uint8_t)params->vertical_total_mid_frame_num); + + } + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, + params->vertical_total_min - 1, params->vertical_total_max - 1); + optc35_setup_manual_trigger(optc); + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0); +} + +static void optc35_set_long_vtotal( + struct timing_generator *optc, + const struct long_vtotal_params *params) +{ + if (!optc || !params) + return; + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t vcount_stop_timer = 0, vcount_stop = 0; + uint32_t max_otg_v_total = optc1->max_v_total - 1; + + if (params->vertical_total_min <= max_otg_v_total && params->vertical_total_max <= max_otg_v_total) + return; + + if (params->vertical_total_max == 0 || params->vertical_total_min == 0) { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } else if (params->vertical_total_max == params->vertical_total_min) { + vcount_stop = params->vertical_blank_start; + vcount_stop_timer = params->vertical_total_max - max_otg_v_total; + + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, 1, + OTG_V_TOTAL_MAX_SEL, 1, + OTG_FORCE_LOCK_ON_EVENT, 0, + OTG_SET_V_TOTAL_MIN_MASK, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, max_otg_v_total, max_otg_v_total); + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer); + } else { + // Variable rate, keep DRR trigger mask + if (params->vertical_total_min > max_otg_v_total) { + // cannot be supported + // If MAX_OTG_V_COUNT < DRR trigger < v_total_min < v_total_max, + // DRR trigger will drop the vtotal counting directly to a new frame. + // But it should trigger between v_total_min and v_total_max. + ASSERT(0); + + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, 0, 0); + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, max_otg_v_total); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, 0); + } else { + // For total_min <= MAX_OTG_V_COUNT and total_max > MAX_OTG_V_COUNT + vcount_stop = params->vertical_total_min; + vcount_stop_timer = params->vertical_total_max - max_otg_v_total; + + // Example: + // params->vertical_total_min 1000 + // params->vertical_total_max 2000 + // MAX_OTG_V_COUNT_STOP = 1500 + // + // If DRR event not happened, + // time 0,1,2,3,4,...1000,1001,........,1500,1501,1502, ...1999 + // vcount 0,1,2,3,4....1000...................,1001,1002,1003,...1399 + // vcount2 0,1,2,3,4,..499, + // else (DRR event happened, ex : at line 1004) + // time 0,1,2,3,4,...1000,1001.....1004, 0 + // vcount 0,1,2,3,4....1000,.............. 0 (new frame) + // vcount2 0,1,2, 3, - + if (optc->funcs && optc->funcs->set_vtotal_min_max) + optc->funcs->set_vtotal_min_max(optc, + params->vertical_total_min - 1, max_otg_v_total); + optc35_setup_manual_trigger(optc); + + REG_WRITE(OTG_V_COUNT_STOP_CONTROL, vcount_stop); + REG_WRITE(OTG_V_COUNT_STOP_CONTROL2, vcount_stop_timer); + } + } +} + static struct timing_generator_funcs dcn35_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, @@ -245,7 +407,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = { .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable, .lock_doublebuffer_disable = optc3_lock_doublebuffer_disable, .enable_optc_clock = optc1_enable_optc_clock, - .set_drr = optc31_set_drr, + .set_drr = optc35_set_drr, .get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal, .set_vtotal_min_max = optc1_set_vtotal_min_max, .set_static_screen_control = optc1_set_static_screen_control, @@ -275,6 +437,7 @@ static struct timing_generator_funcs dcn35_tg_funcs = { .setup_manual_trigger = optc2_setup_manual_trigger, .get_hw_timing = optc1_get_hw_timing, .init_odm = optc3_init_odm, + .set_long_vtotal = optc35_set_long_vtotal, }; void dcn35_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h index 1f422e4c468f..d077e2392379 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h @@ -65,10 +65,14 @@ SF(OTG0_OTG_CRC1_WINDOWB_X_CONTROL_READBACK, OTG_CRC1_WINDOWB_X_END_READBACK, mask_sh),\ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_START_READBACK, mask_sh),\ SF(OTG0_OTG_CRC1_WINDOWB_Y_CONTROL_READBACK, OTG_CRC1_WINDOWB_Y_END_READBACK, mask_sh),\ - SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh) + SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh),\ + SF(OTG0_OTG_V_COUNT_STOP_CONTROL, OTG_V_COUNT_STOP, mask_sh),\ + SF(OTG0_OTG_V_COUNT_STOP_CONTROL2, OTG_V_COUNT_STOP_TIMER, mask_sh) void dcn35_timing_generator_init(struct optc *optc1); void dcn35_timing_generator_set_fgcg(struct optc *optc1, bool enable); +void optc35_set_drr(struct timing_generator *optc, const struct drr_params *params); + #endif /* __DC_OPTC_DCN35_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h index a51c4a9eaafe..f97bb4cb3761 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h @@ -240,6 +240,8 @@ struct resource_pool *dcn35_create_resource_pool( SRI_ARR(OTG_V_TOTAL_MAX, OTG, inst),\ SRI_ARR(OTG_V_TOTAL_MIN, OTG, inst),\ SRI_ARR(OTG_V_TOTAL_CONTROL, OTG, inst),\ + SRI_ARR(OTG_V_COUNT_STOP_CONTROL, OTG, inst),\ + SRI_ARR(OTG_V_COUNT_STOP_CONTROL2, OTG, inst),\ SRI_ARR(OTG_TRIGA_CNTL, OTG, inst),\ SRI_ARR(OTG_FORCE_COUNT_NOW_CNTL, OTG, inst),\ SRI_ARR(OTG_STATIC_SCREEN_CONTROL, OTG, inst),\ diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 3955b7e4b2e2..fbaa6effd0e3 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -1126,6 +1126,8 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->adjust.v_total_min = stream->timing.v_total; in_out_vrr->adjust.v_total_max = stream->timing.v_total; } + + in_out_vrr->adjust.allow_otg_v_count_halt = (in_config->state == VRR_STATE_ACTIVE_FIXED) ? true : false; } void mod_freesync_handle_preflip(struct mod_freesync *mod_freesync, -- cgit v1.2.3-70-g09d2 From 63ae548f1054a0b71678d0349c7dc9628ddd42ca Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Mon, 26 Feb 2024 18:38:08 +0530 Subject: drm/amd/display: Fix potential index out of bounds in color transformation function Fixes index out of bounds issue in the color transformation function. The issue could occur when the index 'i' exceeds the number of transfer function points (TRANSFER_FUNC_POINTS). The fix adds a check to ensure 'i' is within bounds before accessing the transfer function points. If 'i' is out of bounds, an error message is logged and the function returns false to indicate an error. Reported by smatch: drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_cm_common.c:405 cm_helper_translate_curve_to_hw_format() error: buffer overflow 'output_tf->tf_pts.red' 1025 <= s32max drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_cm_common.c:406 cm_helper_translate_curve_to_hw_format() error: buffer overflow 'output_tf->tf_pts.green' 1025 <= s32max drivers/gpu/drm/amd/amdgpu/../display/dc/dcn10/dcn10_cm_common.c:407 cm_helper_translate_curve_to_hw_format() error: buffer overflow 'output_tf->tf_pts.blue' 1025 <= s32max Fixes: b629596072e5 ("drm/amd/display: Build unity lut for shaper") Cc: Vitaly Prosyak Cc: Charlene Liu Cc: Harry Wentland Cc: Rodrigo Siqueira Cc: Roman Li Cc: Aurabindo Pillai Cc: Tom Chung Signed-off-by: Srinivasan Shanmugam Reviewed-by: Tom Chung Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index b7e57aa27361..b0d192c6e63e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -402,6 +402,11 @@ bool cm_helper_translate_curve_to_hw_format(struct dc_context *ctx, i += increment) { if (j == hw_points - 1) break; + if (i >= TRANSFER_FUNC_POINTS) { + DC_LOG_ERROR("Index out of bounds: i=%d, TRANSFER_FUNC_POINTS=%d\n", + i, TRANSFER_FUNC_POINTS); + return false; + } rgb_resulted[j].red = output_tf->tf_pts.red[i]; rgb_resulted[j].green = output_tf->tf_pts.green[i]; rgb_resulted[j].blue = output_tf->tf_pts.blue[i]; -- cgit v1.2.3-70-g09d2 From 97d5aa60306d286326026a40e1e3b992b5b31538 Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Tue, 12 Mar 2024 16:23:46 +0100 Subject: drm/amdgpu: cleanup unused variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch removes an unused input variable in the MES doorbell function. Cc: Christian König Cc: Alex Deucher Reviewed-by: Christian König Signed-off-by: Shashank Sharma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index a98e03e0a51f..eb75c524b4a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -40,7 +40,6 @@ int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev) } static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev, - struct amdgpu_mes_process *process, int ip_type, uint64_t *doorbell_index) { unsigned int offset, found; @@ -65,7 +64,6 @@ static int amdgpu_mes_kernel_doorbell_get(struct amdgpu_device *adev, } static void amdgpu_mes_kernel_doorbell_free(struct amdgpu_device *adev, - struct amdgpu_mes_process *process, uint32_t doorbell_index) { unsigned int old, rel_index; @@ -653,7 +651,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, *queue_id = queue->queue_id = r; /* allocate a doorbell index for the queue */ - r = amdgpu_mes_kernel_doorbell_get(adev, gang->process, + r = amdgpu_mes_kernel_doorbell_get(adev, qprops->queue_type, &qprops->doorbell_off); if (r) @@ -711,8 +709,7 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id, return 0; clean_up_doorbell: - amdgpu_mes_kernel_doorbell_free(adev, gang->process, - qprops->doorbell_off); + amdgpu_mes_kernel_doorbell_free(adev, qprops->doorbell_off); clean_up_queue_id: spin_lock_irqsave(&adev->mes.queue_id_lock, flags); idr_remove(&adev->mes.queue_id_idr, queue->queue_id); @@ -766,8 +763,7 @@ int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id) queue_id); list_del(&queue->list); - amdgpu_mes_kernel_doorbell_free(adev, gang->process, - queue->doorbell_off); + amdgpu_mes_kernel_doorbell_free(adev, queue->doorbell_off); amdgpu_mes_unlock(&adev->mes); amdgpu_mes_queue_free_mqd(queue); -- cgit v1.2.3-70-g09d2 From f57269f85b284f3a8154e208d7b1eb2abe652eb2 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Fri, 16 Feb 2024 14:42:05 -0500 Subject: drm/amd/display: Add guards for idle on reg read/write [WHY] If DCN is in idle then we should not be accessing DCN registers or it can lead to hangs. [HOW] Log the error and return 0 or drop the write if it's in idle. This is skipped in the exit sequence itself since the boolean flips before it starts. It also does not cover accesses from external clients outside of DM/DC like firmware or the kernel mode driver. Reviewed-by: Duncan Ma Acked-by: Alex Hung Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 9 +++++++++ drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h | 1 + 2 files changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index b168dfc79381..938e8cb2fec1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1270,6 +1270,10 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) /* NOTE: This does not use the "wake" interface since this is part of the wake path. */ /* We also do not perform a wait since DMCUB could enter idle after the notification. */ dm_execute_dmub_cmd(dc->ctx, &cmd, allow_idle ? DM_DMUB_WAIT_TYPE_NO_WAIT : DM_DMUB_WAIT_TYPE_WAIT); + + /* Register access should stop at this point. */ + if (allow_idle) + dc_dmub_srv->needs_idle_wake = true; } static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) @@ -1301,6 +1305,11 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); + /* Note: register access has technically not resumed for DCN here, but we + * need to be message PMFW through our standard register interface. + */ + dc_dmub_srv->needs_idle_wake = false; + if (prev_driver_signals.bits.allow_ips2) { DC_LOG_IPS( "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)", diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 952bfb368886..60c93e9e3533 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -52,6 +52,7 @@ struct dc_dmub_srv { void *dm; bool idle_allowed; + bool needs_idle_wake; }; void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); -- cgit v1.2.3-70-g09d2 From 27f03bc680ef7e0f18b4cacba96f29674d93d817 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 21 Feb 2024 17:14:08 -0500 Subject: drm/amd/display: Guard cursor idle reallow by DC debug option [WHY] To control whether idle optimizations reallowed after the first cursor update. [HOW] Add checks to the conditions. Reviewed-by: Duncan Ma Acked-by: Alex Hung Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 51a970fcb5d0..212e057ed9b0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -319,7 +319,7 @@ bool dc_stream_set_cursor_attributes( program_cursor_attributes(dc, stream, attributes); /* re-enable idle optimizations if necessary */ - if (reset_idle_optimizations) + if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle) dc_allow_idle_optimizations(dc, true); return true; @@ -394,7 +394,7 @@ bool dc_stream_set_cursor_position( program_cursor_position(dc, stream, position); /* re-enable idle optimizations if necessary */ - if (reset_idle_optimizations) + if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle) dc_allow_idle_optimizations(dc, true); return true; -- cgit v1.2.3-70-g09d2 From e4251d110154b6c4090d0188404345ded4ecefbe Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 21 Feb 2024 13:05:09 -0500 Subject: drm/amd/display: Add debug counters to IPS exit prints [WHY] To have a log of the entry/exit counters in case the system hangs to measure stability. [HOW] Read them from firmware state and pass them to the prints. Reviewed-by: Duncan Ma Acked-by: Alex Hung Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 938e8cb2fec1..765a459d54eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1279,6 +1279,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv; + uint32_t rcg_exit_count, ips1_exit_count, ips2_exit_count; if (dc->debug.dmcub_emulation) return; @@ -1295,15 +1296,22 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) &dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver; union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals; + rcg_exit_count = ips_fw->rcg_exit_count; + ips1_exit_count = ips_fw->ips1_exit_count; + ips2_exit_count = ips_fw->ips2_exit_count; + ips_driver->signals.all = 0; DC_LOG_IPS( - "%s check (allow_ips1=%d allow_ips2=%d) (ips1_commit=%d ips2_commit=%d)", + "%s (allow ips1=%d ips2=%d) (commit ips1=%d ips2=%d) (count rcg=%d ips1=%d ips2=%d)", __func__, ips_driver->signals.bits.allow_ips1, ips_driver->signals.bits.allow_ips2, ips_fw->signals.bits.ips1_commit, - ips_fw->signals.bits.ips2_commit); + ips_fw->signals.bits.ips2_commit, + ips_fw->rcg_entry_count, + ips_fw->ips1_entry_count, + ips_fw->ips2_entry_count); /* Note: register access has technically not resumed for DCN here, but we * need to be message PMFW through our standard register interface. @@ -1387,7 +1395,11 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); - DC_LOG_IPS("%s exited", __func__); + DC_LOG_IPS("%s exit (count rcg=%d ips1=%d ips2=%d)", + __func__, + rcg_exit_count, + ips1_exit_count, + ips2_exit_count); } void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState) -- cgit v1.2.3-70-g09d2 From 88867807564e28409d8220419da7559364109a2a Mon Sep 17 00:00:00 2001 From: Revalla Hari Krishna Date: Thu, 15 Feb 2024 18:34:56 +0530 Subject: drm/amd/display: Refactor DPP into a component directory [WHY & HOW] Move all dpp files to a new dpp directory. Reviewed-by: Martin Leung Acked-by: Alex Hung Signed-off-by: Revalla Hari Krishna Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/Makefile | 1 + drivers/gpu/drm/amd/display/dc/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 4 +- .../gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 585 -------- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 1527 -------------------- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c | 884 ----------- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 696 --------- drivers/gpu/drm/amd/display/dc/dcn20/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c | 435 ------ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h | 781 ---------- .../gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c | 1202 --------------- drivers/gpu/drm/amd/display/dc/dcn201/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c | 313 ---- drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h | 83 -- drivers/gpu/drm/amd/display/dc/dcn30/Makefile | 2 - .../gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c | 1527 -------------------- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h | 642 -------- .../gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c | 461 ------ drivers/gpu/drm/amd/display/dc/dcn32/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c | 165 --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h | 38 - drivers/gpu/drm/amd/display/dc/dcn35/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c | 53 - drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h | 57 - drivers/gpu/drm/amd/display/dc/dpp/Makefile | 77 + .../drm/amd/display/dc/dpp/dcn10/CMakeLists.txt | 6 + .../gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c | 585 ++++++++ .../gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h | 1527 ++++++++++++++++++++ .../drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c | 884 +++++++++++ .../drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c | 696 +++++++++ .../drm/amd/display/dc/dpp/dcn20/CMakeLists.txt | 5 + .../gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c | 435 ++++++ .../gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h | 781 ++++++++++ .../drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c | 1202 +++++++++++++++ .../drm/amd/display/dc/dpp/dcn201/CMakeLists.txt | 4 + .../gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c | 313 ++++ .../gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h | 83 ++ .../drm/amd/display/dc/dpp/dcn30/CMakeLists.txt | 5 + .../gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c | 1527 ++++++++++++++++++++ .../gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h | 642 ++++++++ .../drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c | 461 ++++++ .../drm/amd/display/dc/dpp/dcn32/CMakeLists.txt | 4 + .../gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c | 165 +++ .../gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h | 38 + .../drm/amd/display/dc/dpp/dcn35/CMakeLists.txt | 4 + .../gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c | 53 + .../gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h | 57 + 49 files changed, 9564 insertions(+), 9460 deletions(-) delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c delete mode 100644 drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/Makefile create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c create mode 100644 drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 92a5c5efcf92..9a5bcafbf730 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -33,6 +33,7 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/hwss subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/resource subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dsc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/optc +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dpp subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/inc subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 7991ae468f75..4e9fb1742877 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -22,7 +22,7 @@ # # Makefile for Display Core (dc) component. -DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc +DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc dpp ifdef CONFIG_DRM_AMD_DC_FP diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index ae6a131be71b..8dc7938c36d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -24,9 +24,9 @@ DCN10 = dcn10_ipp.o \ dcn10_hw_sequencer_debug.o \ - dcn10_dpp.o dcn10_opp.o \ + dcn10_opp.o \ dcn10_hubp.o dcn10_mpc.o \ - dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ + dcn10_cm_common.o \ dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index b0d192c6e63e..0b49362f71b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -24,7 +24,7 @@ */ #include "dc.h" #include "reg_helper.h" -#include "dcn10_dpp.h" +#include "dcn10/dcn10_dpp.h" #include "dcn10_cm_common.h" #include "custom_float.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c deleted file mode 100644 index 4e391fd1d71c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ /dev/null @@ -1,585 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn10_dpp.h" -#include "basics/conversion.h" - -#define NUM_PHASES 64 -#define HORZ_MAX_TAPS 8 -#define VERT_MAX_TAPS 8 - -#define BLACK_OFFSET_RGB_Y 0x0 -#define BLACK_OFFSET_CBCR 0x8000 - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -enum pixel_format_description { - PIXEL_FORMAT_FIXED = 0, - PIXEL_FORMAT_FIXED16, - PIXEL_FORMAT_FLOAT - -}; - -enum dcn10_coef_filter_type_sel { - SCL_COEF_LUMA_VERT_FILTER = 0, - SCL_COEF_LUMA_HORZ_FILTER = 1, - SCL_COEF_CHROMA_VERT_FILTER = 2, - SCL_COEF_CHROMA_HORZ_FILTER = 3, - SCL_COEF_ALPHA_VERT_FILTER = 4, - SCL_COEF_ALPHA_HORZ_FILTER = 5 -}; - -enum dscl_autocal_mode { - AUTOCAL_MODE_OFF = 0, - - /* Autocal calculate the scaling ratio and initial phase and the - * DSCL_MODE_SEL must be set to 1 - */ - AUTOCAL_MODE_AUTOSCALE = 1, - /* Autocal perform auto centering without replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOCENTER = 2, - /* Autocal perform auto centering and auto replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOREPLICATE = 3 -}; - -enum dscl_mode_sel { - DSCL_MODE_SCALING_444_BYPASS = 0, - DSCL_MODE_SCALING_444_RGB_ENABLE = 1, - DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, - DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, - DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, - DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, - DSCL_MODE_DSCL_BYPASS = 6 -}; - -void dpp_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_GET(DPP_CONTROL, - DPP_CLOCK_ENABLE, &s->is_enabled); - REG_GET(CM_IGAM_CONTROL, - CM_IGAM_LUT_MODE, &s->igam_lut_mode); - REG_GET(CM_IGAM_CONTROL, - CM_IGAM_INPUT_FORMAT, &s->igam_input_format); - REG_GET(CM_DGAM_CONTROL, - CM_DGAM_LUT_MODE, &s->dgam_lut_mode); - REG_GET(CM_RGAM_CONTROL, - CM_RGAM_LUT_MODE, &s->rgam_lut_mode); - REG_GET(CM_GAMUT_REMAP_CONTROL, - CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode); - - if (s->gamut_remap_mode) { - s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12); - s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14); - s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22); - s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24); - s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32); - s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34); - } -} - -#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) - -bool dpp1_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps) -{ - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ - if (scl_data->format == PIXEL_FORMAT_FP16 && - dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && - scl_data->ratios.horz.value != dc_fixpt_one.value && - scl_data->ratios.vert.value != dc_fixpt_one.value) - return false; - - if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - - /* TODO: add lb check */ - - /* No support for programming ratio of 4, drop to 3.99999.. */ - if (scl_data->ratios.horz.value == (4ll << 32)) - scl_data->ratios.horz.value--; - if (scl_data->ratios.vert.value == (4ll << 32)) - scl_data->ratios.vert.value--; - if (scl_data->ratios.horz_c.value == (4ll << 32)) - scl_data->ratios.horz_c.value--; - if (scl_data->ratios.vert_c.value == (4ll << 32)) - scl_data->ratios.vert_c.value--; - - /* Set default taps if none are provided */ - if (in_taps->h_taps == 0) - scl_data->taps.h_taps = 4; - else - scl_data->taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) - scl_data->taps.v_taps = 4; - else - scl_data->taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) - scl_data->taps.v_taps_c = 2; - else - scl_data->taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) - scl_data->taps.h_taps_c = 2; - /* Only 1 and even h_taps_c are supported by hw */ - else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; - else - scl_data->taps.h_taps_c = in_taps->h_taps_c; - - if (!dpp->ctx->dc->debug.always_scale) { - if (IDENTITY_RATIO(scl_data->ratios.horz)) - scl_data->taps.h_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert)) - scl_data->taps.v_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.horz_c)) - scl_data->taps.h_taps_c = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert_c)) - scl_data->taps.v_taps_c = 1; - } - - return true; -} - -void dpp_reset(struct dpp *dpp_base) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - dpp->filter_h_c = NULL; - dpp->filter_v_c = NULL; - dpp->filter_h = NULL; - dpp->filter_v = NULL; - - memset(&dpp->scl_data, 0, sizeof(dpp->scl_data)); - memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data)); -} - - - -static void dpp1_cm_set_regamma_pwl( - struct dpp *dpp_base, const struct pwl_params *params, enum opp_regamma mode) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - uint32_t re_mode = 0; - - switch (mode) { - case OPP_REGAMMA_BYPASS: - re_mode = 0; - break; - case OPP_REGAMMA_SRGB: - re_mode = 1; - break; - case OPP_REGAMMA_XVYCC: - re_mode = 2; - break; - case OPP_REGAMMA_USER: - re_mode = dpp->is_write_to_ram_a_safe ? 4 : 3; - if (memcmp(&dpp->pwl_data, params, sizeof(*params)) == 0) - break; - - dpp1_cm_power_on_regamma_lut(dpp_base, true); - dpp1_cm_configure_regamma_lut(dpp_base, dpp->is_write_to_ram_a_safe); - - if (dpp->is_write_to_ram_a_safe) - dpp1_cm_program_regamma_luta_settings(dpp_base, params); - else - dpp1_cm_program_regamma_lutb_settings(dpp_base, params); - - dpp1_cm_program_regamma_lut(dpp_base, params->rgb_resulted, - params->hw_points_num); - dpp->pwl_data = *params; - - re_mode = dpp->is_write_to_ram_a_safe ? 3 : 4; - dpp->is_write_to_ram_a_safe = !dpp->is_write_to_ram_a_safe; - break; - default: - break; - } - REG_SET(CM_RGAM_CONTROL, 0, CM_RGAM_LUT_MODE, re_mode); -} - -static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\ - enum pixel_format_description *fmt) -{ - - if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F || - input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) - *fmt = PIXEL_FORMAT_FLOAT; - else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 || - input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616) - *fmt = PIXEL_FORMAT_FIXED16; - else - *fmt = PIXEL_FORMAT_FIXED; -} - -static void dpp1_set_degamma_format_float( - struct dpp *dpp_base, - bool is_float) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - if (is_float) { - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 3); - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 1); - } else { - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 2); - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 0); - } -} - -void dpp1_cnv_setup ( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut) -{ - uint32_t pixel_format; - uint32_t alpha_en; - enum pixel_format_description fmt ; - enum dc_color_space color_space; - enum dcn10_input_csc_select select; - bool is_float; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - bool force_disable_cursor = false; - struct out_csc_color_matrix tbl_entry; - int i = 0; - - dpp1_setup_format_flags(format, &fmt); - alpha_en = 1; - pixel_format = 0; - color_space = COLOR_SPACE_SRGB; - select = INPUT_CSC_SELECT_BYPASS; - is_float = false; - - switch (fmt) { - case PIXEL_FORMAT_FIXED: - case PIXEL_FORMAT_FIXED16: - /*when output is float then FORMAT_CONTROL__OUTPUT_FP=1*/ - REG_SET_3(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_EXPANSION_MODE, mode, - OUTPUT_FP, 0); - break; - case PIXEL_FORMAT_FLOAT: - REG_SET_3(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_EXPANSION_MODE, mode, - OUTPUT_FP, 1); - is_float = true; - break; - default: - - break; - } - - dpp1_set_degamma_format_float(dpp_base, is_float); - - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - pixel_format = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - pixel_format = 3; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - pixel_format = 8; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - pixel_format = 10; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - force_disable_cursor = false; - pixel_format = 65; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - force_disable_cursor = true; - pixel_format = 64; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - force_disable_cursor = true; - pixel_format = 67; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - force_disable_cursor = true; - pixel_format = 66; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - pixel_format = 26; /* ARGB16161616_UNORM */ - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - pixel_format = 24; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - pixel_format = 25; - break; - default: - break; - } - - /* Set default color space based on format if none is given. */ - color_space = input_color_space ? input_color_space : color_space; - - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, pixel_format); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); - - // if input adjustments exist, program icsc with those values - - if (input_csc_color_matrix.enable_adjustment - == true) { - for (i = 0; i < 12; i++) - tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; - - tbl_entry.color_space = color_space; - - if (color_space >= COLOR_SPACE_YCBCR601) - select = INPUT_CSC_SELECT_ICSC; - else - select = INPUT_CSC_SELECT_BYPASS; - - dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry); - } else - dpp1_program_input_csc(dpp_base, color_space, select, NULL); - - if (force_disable_cursor) { - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, 0); - REG_UPDATE(CURSOR0_CONTROL, - CUR0_ENABLE, 0); - } -} - -void dpp1_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes) -{ - enum dc_cursor_color_format color_format = cursor_attributes->color_format; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_UPDATE_2(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0); - - if (color_format == CURSOR_MODE_MONO) { - /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); - } -} - - -void dpp1_set_cursor_position( - struct dpp *dpp_base, - const struct dc_cursor_position *pos, - const struct dc_cursor_mi_param *param, - uint32_t width, - uint32_t height) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - int x_pos = pos->x - param->viewport.x; - int y_pos = pos->y - param->viewport.y; - int x_hotspot = pos->x_hotspot; - int y_hotspot = pos->y_hotspot; - int src_x_offset = x_pos - pos->x_hotspot; - int src_y_offset = y_pos - pos->y_hotspot; - int cursor_height = (int)height; - int cursor_width = (int)width; - uint32_t cur_en = pos->enable ? 1 : 0; - - // Transform cursor width / height and hotspots for offset calculations - if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) { - swap(cursor_height, cursor_width); - swap(x_hotspot, y_hotspot); - - if (param->rotation == ROTATION_ANGLE_90) { - // hotspot = (-y, x) - src_x_offset = x_pos - (cursor_width - x_hotspot); - src_y_offset = y_pos - y_hotspot; - } else if (param->rotation == ROTATION_ANGLE_270) { - // hotspot = (y, -x) - src_x_offset = x_pos - x_hotspot; - src_y_offset = y_pos - (cursor_height - y_hotspot); - } - } else if (param->rotation == ROTATION_ANGLE_180) { - // hotspot = (-x, -y) - if (!param->mirror) - src_x_offset = x_pos - (cursor_width - x_hotspot); - - src_y_offset = y_pos - (cursor_height - y_hotspot); - } - - if (src_x_offset >= (int)param->viewport.width) - cur_en = 0; /* not visible beyond right edge*/ - - if (src_x_offset + cursor_width <= 0) - cur_en = 0; /* not visible beyond left edge*/ - - if (src_y_offset >= (int)param->viewport.height) - cur_en = 0; /* not visible beyond bottom edge*/ - - if (src_y_offset + cursor_height <= 0) - cur_en = 0; /* not visible beyond top edge*/ - - REG_UPDATE(CURSOR0_CONTROL, - CUR0_ENABLE, cur_en); - - dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; -} - -void dpp1_cnv_set_optional_cursor_attributes( - struct dpp *dpp_base, - struct dpp_cursor_attributes *attr) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - if (attr) { - REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias); - REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale); - } -} - -void dpp1_dppclk_control( - struct dpp *dpp_base, - bool dppclk_div, - bool enable) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - if (enable) { - if (dpp->tf_mask->DPPCLK_RATE_CONTROL) - REG_UPDATE_2(DPP_CONTROL, - DPPCLK_RATE_CONTROL, dppclk_div, - DPP_CLOCK_ENABLE, 1); - else - REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 1); - } else - REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0); -} - -static const struct dpp_funcs dcn10_dpp_funcs = { - .dpp_read_state = dpp_read_state, - .dpp_reset = dpp_reset, - .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, - .dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment, - .dpp_set_csc_default = dpp1_cm_set_output_csc_default, - .dpp_power_on_regamma_lut = dpp1_cm_power_on_regamma_lut, - .dpp_program_regamma_lut = dpp1_cm_program_regamma_lut, - .dpp_configure_regamma_lut = dpp1_cm_configure_regamma_lut, - .dpp_program_regamma_lutb_settings = dpp1_cm_program_regamma_lutb_settings, - .dpp_program_regamma_luta_settings = dpp1_cm_program_regamma_luta_settings, - .dpp_program_regamma_pwl = dpp1_cm_set_regamma_pwl, - .dpp_program_bias_and_scale = dpp1_program_bias_and_scale, - .dpp_set_degamma = dpp1_set_degamma, - .dpp_program_input_lut = dpp1_program_input_lut, - .dpp_program_degamma_pwl = dpp1_set_degamma_pwl, - .dpp_setup = dpp1_cnv_setup, - .dpp_full_bypass = dpp1_full_bypass, - .set_cursor_attributes = dpp1_set_cursor_attributes, - .set_cursor_position = dpp1_set_cursor_position, - .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, - .dpp_dppclk_control = dpp1_dppclk_control, - .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, - .dpp_program_blnd_lut = NULL, - .dpp_program_shaper_lut = NULL, - .dpp_program_3dlut = NULL, - .dpp_get_gamut_remap = dpp1_cm_get_gamut_remap, -}; - -static struct dpp_caps dcn10_dpp_cap = { - .dscl_data_proc_format = DSCL_DATA_PRCESSING_FIXED_FORMAT, - .dscl_calc_lb_num_partitions = dpp1_dscl_calc_lb_num_partitions, -}; - -/*****************************************/ -/* Constructor, Destructor */ -/*****************************************/ - -void dpp1_construct( - struct dcn10_dpp *dpp, - struct dc_context *ctx, - uint32_t inst, - const struct dcn_dpp_registers *tf_regs, - const struct dcn_dpp_shift *tf_shift, - const struct dcn_dpp_mask *tf_mask) -{ - dpp->base.ctx = ctx; - - dpp->base.inst = inst; - dpp->base.funcs = &dcn10_dpp_funcs; - dpp->base.caps = &dcn10_dpp_cap; - - dpp->tf_regs = tf_regs; - dpp->tf_shift = tf_shift; - dpp->tf_mask = tf_mask; - - dpp->lb_pixel_depth_supported = - LB_PIXEL_DEPTH_18BPP | - LB_PIXEL_DEPTH_24BPP | - LB_PIXEL_DEPTH_30BPP | - LB_PIXEL_DEPTH_36BPP; - - dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; - dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/ -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h deleted file mode 100644 index a039eedc7c24..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ /dev/null @@ -1,1527 +0,0 @@ -/* Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DAL_DPP_DCN10_H__ -#define __DAL_DPP_DCN10_H__ - -#include "dpp.h" - -#define TO_DCN10_DPP(dpp)\ - container_of(dpp, struct dcn10_dpp, base) - -/* TODO: Use correct number of taps. Using polaris values for now */ -#define LB_TOTAL_NUMBER_OF_ENTRIES 5124 -#define LB_BITS_PER_ENTRY 144 - -#define TF_SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## __ ## field_name ## post_fix - -//Used to resolve corner case -#define TF2_SF(reg_name, field_name, post_fix)\ - .field_name = reg_name ## _ ## field_name ## post_fix - -#define TF_REG_LIST_DCN(id) \ - SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\ - SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\ - SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\ - SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\ - SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\ - SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\ - SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\ - SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ - SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ - SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \ - SRI(DSCL_MEM_PWR_CTRL, DSCL, id), \ - SRI(OTG_H_BLANK, DSCL, id), \ - SRI(OTG_V_BLANK, DSCL, id), \ - SRI(SCL_MODE, DSCL, id), \ - SRI(LB_DATA_FORMAT, DSCL, id), \ - SRI(LB_MEMORY_CTRL, DSCL, id), \ - SRI(DSCL_AUTOCAL, DSCL, id), \ - SRI(DSCL_CONTROL, DSCL, id), \ - SRI(SCL_BLACK_OFFSET, DSCL, id), \ - SRI(SCL_TAP_CONTROL, DSCL, id), \ - SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \ - SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \ - SRI(DSCL_2TAP_CONTROL, DSCL, id), \ - SRI(MPC_SIZE, DSCL, id), \ - SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \ - SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \ - SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \ - SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \ - SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \ - SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT_BOT, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT_BOT_C, DSCL, id), \ - SRI(RECOUT_START, DSCL, id), \ - SRI(RECOUT_SIZE, DSCL, id), \ - SRI(CM_ICSC_CONTROL, CM, id), \ - SRI(CM_ICSC_C11_C12, CM, id), \ - SRI(CM_ICSC_C33_C34, CM, id), \ - SRI(CM_DGAM_RAMB_START_CNTL_B, CM, id), \ - SRI(CM_DGAM_RAMB_START_CNTL_G, CM, id), \ - SRI(CM_DGAM_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_DGAM_RAMB_SLOPE_CNTL_B, CM, id), \ - SRI(CM_DGAM_RAMB_SLOPE_CNTL_G, CM, id), \ - SRI(CM_DGAM_RAMB_SLOPE_CNTL_R, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL1_B, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL2_B, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL1_G, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL2_G, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL1_R, CM, id), \ - SRI(CM_DGAM_RAMB_END_CNTL2_R, CM, id), \ - SRI(CM_DGAM_RAMB_REGION_0_1, CM, id), \ - SRI(CM_DGAM_RAMB_REGION_14_15, CM, id), \ - SRI(CM_DGAM_RAMA_START_CNTL_B, CM, id), \ - SRI(CM_DGAM_RAMA_START_CNTL_G, CM, id), \ - SRI(CM_DGAM_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_DGAM_RAMA_SLOPE_CNTL_B, CM, id), \ - SRI(CM_DGAM_RAMA_SLOPE_CNTL_G, CM, id), \ - SRI(CM_DGAM_RAMA_SLOPE_CNTL_R, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL1_B, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL2_B, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL1_G, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL2_G, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL1_R, CM, id), \ - SRI(CM_DGAM_RAMA_END_CNTL2_R, CM, id), \ - SRI(CM_DGAM_RAMA_REGION_0_1, CM, id), \ - SRI(CM_DGAM_RAMA_REGION_14_15, CM, id), \ - SRI(CM_MEM_PWR_CTRL, CM, id), \ - SRI(CM_DGAM_LUT_WRITE_EN_MASK, CM, id), \ - SRI(CM_DGAM_LUT_INDEX, CM, id), \ - SRI(CM_DGAM_LUT_DATA, CM, id), \ - SRI(CM_CONTROL, CM, id), \ - SRI(CM_DGAM_CONTROL, CM, id), \ - SRI(CM_TEST_DEBUG_INDEX, CM, id), \ - SRI(CM_TEST_DEBUG_DATA, CM, id), \ - SRI(FORMAT_CONTROL, CNVC_CFG, id), \ - SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ - SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ - SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ - SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ - SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \ - SRI(DPP_CONTROL, DPP_TOP, id), \ - SRI(CM_HDR_MULT_COEF, CM, id) - - - -#define TF_REG_LIST_DCN10(id) \ - TF_REG_LIST_DCN(id), \ - SRI(CM_COMA_C11_C12, CM, id),\ - SRI(CM_COMA_C33_C34, CM, id),\ - SRI(CM_COMB_C11_C12, CM, id),\ - SRI(CM_COMB_C33_C34, CM, id),\ - SRI(CM_OCSC_CONTROL, CM, id), \ - SRI(CM_OCSC_C11_C12, CM, id), \ - SRI(CM_OCSC_C33_C34, CM, id), \ - SRI(CM_BNS_VALUES_R, CM, id), \ - SRI(CM_BNS_VALUES_G, CM, id), \ - SRI(CM_BNS_VALUES_B, CM, id), \ - SRI(CM_MEM_PWR_CTRL, CM, id), \ - SRI(CM_RGAM_LUT_DATA, CM, id), \ - SRI(CM_RGAM_LUT_WRITE_EN_MASK, CM, id),\ - SRI(CM_RGAM_LUT_INDEX, CM, id), \ - SRI(CM_RGAM_RAMB_START_CNTL_B, CM, id), \ - SRI(CM_RGAM_RAMB_START_CNTL_G, CM, id), \ - SRI(CM_RGAM_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_RGAM_RAMB_SLOPE_CNTL_B, CM, id), \ - SRI(CM_RGAM_RAMB_SLOPE_CNTL_G, CM, id), \ - SRI(CM_RGAM_RAMB_SLOPE_CNTL_R, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL1_B, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL2_B, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL1_G, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL2_G, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL1_R, CM, id), \ - SRI(CM_RGAM_RAMB_END_CNTL2_R, CM, id), \ - SRI(CM_RGAM_RAMB_REGION_0_1, CM, id), \ - SRI(CM_RGAM_RAMB_REGION_32_33, CM, id), \ - SRI(CM_RGAM_RAMA_START_CNTL_B, CM, id), \ - SRI(CM_RGAM_RAMA_START_CNTL_G, CM, id), \ - SRI(CM_RGAM_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_RGAM_RAMA_SLOPE_CNTL_B, CM, id), \ - SRI(CM_RGAM_RAMA_SLOPE_CNTL_G, CM, id), \ - SRI(CM_RGAM_RAMA_SLOPE_CNTL_R, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL1_B, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL2_B, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL1_G, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL2_G, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL1_R, CM, id), \ - SRI(CM_RGAM_RAMA_END_CNTL2_R, CM, id), \ - SRI(CM_RGAM_RAMA_REGION_0_1, CM, id), \ - SRI(CM_RGAM_RAMA_REGION_32_33, CM, id), \ - SRI(CM_RGAM_CONTROL, CM, id), \ - SRI(CM_IGAM_CONTROL, CM, id), \ - SRI(CM_IGAM_LUT_RW_CONTROL, CM, id), \ - SRI(CM_IGAM_LUT_RW_INDEX, CM, id), \ - SRI(CM_IGAM_LUT_SEQ_COLOR, CM, id), \ - SRI(CURSOR_CONTROL, CURSOR, id), \ - SRI(CM_CMOUT_CONTROL, CM, id) - - -#define TF_REG_LIST_SH_MASK_DCN(mask_sh)\ - TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\ - TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\ - TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\ - TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\ - TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\ - TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\ - TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\ - TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\ - TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\ - TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\ - TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\ - TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\ - TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\ - TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\ - TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\ - TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\ - TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_FRAC_BOT, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_INT_BOT, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_FRAC_BOT_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_INT_BOT_C, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \ - TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh), \ - TF_SF(DSCL0_DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, mask_sh), \ - TF_SF(CM0_CM_ICSC_CONTROL, CM_ICSC_MODE, mask_sh), \ - TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C11, mask_sh), \ - TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C12, mask_sh), \ - TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C33, mask_sh), \ - TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C34, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_B, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_G, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_R, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_B, CM_DGAM_RAMB_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_G, CM_DGAM_RAMB_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_R, CM_DGAM_RAMB_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_B, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_G, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_R, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_B, CM_DGAM_RAMA_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_G, CM_DGAM_RAMA_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_R, CM_DGAM_RAMA_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, mask_sh), \ - TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \ - TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \ - TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ - TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ - TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \ - TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \ - TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh) - -#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\ - TF_REG_LIST_SH_MASK_DCN(mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_REDUCE_MODE, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, DYNAMIC_PIXEL_DEPTH, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, DITHER_EN, mask_sh),\ - TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C11, mask_sh),\ - TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C12, mask_sh),\ - TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C33, mask_sh),\ - TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C34, mask_sh),\ - TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C11, mask_sh),\ - TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C12, mask_sh),\ - TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C33, mask_sh),\ - TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C34, mask_sh),\ - TF_SF(CM0_CM_OCSC_CONTROL, CM_OCSC_MODE, mask_sh), \ - TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C11, mask_sh), \ - TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C12, mask_sh), \ - TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C33, mask_sh), \ - TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C34, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_R, CM_BNS_BIAS_R, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_G, CM_BNS_BIAS_G, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_B, CM_BNS_BIAS_B, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_R, CM_BNS_SCALE_R, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_G, CM_BNS_SCALE_G, mask_sh), \ - TF_SF(CM0_CM_BNS_VALUES_B, CM_BNS_SCALE_B, mask_sh), \ - TF_SF(CM0_CM_MEM_PWR_CTRL, RGAM_MEM_PWR_FORCE, mask_sh), \ - TF_SF(CM0_CM_RGAM_LUT_DATA, CM_RGAM_LUT_DATA, mask_sh), \ - TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_RGAM_LUT_INDEX, CM_RGAM_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_B, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_G, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_R, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_B, CM_RGAM_RAMB_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_G, CM_RGAM_RAMB_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_R, CM_RGAM_RAMB_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_B, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_G, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_R, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_B, CM_RGAM_RAMA_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_G, CM_RGAM_RAMA_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_R, CM_RGAM_RAMA_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_RGAM_CONTROL, CM_RGAM_LUT_MODE, mask_sh), \ - TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, mask_sh), \ - TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_R, mask_sh), \ - TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_G, mask_sh), \ - TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_B, mask_sh), \ - TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, mask_sh), \ - TF_SF(CM0_CM_CONTROL, CM_BYPASS_EN, mask_sh), \ - TF_SF(CM0_CM_IGAM_LUT_SEQ_COLOR, CM_IGAM_LUT_SEQ_COLOR, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh), \ - TF_SF(CM0_CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, mask_sh), \ - TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ - TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ - TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ - TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ - TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh) - -/* - * - DCN1 CM debug status register definition - - register :ID9_CM_STATUS do - implement_ref :cm - map to: :cmdebugind, at: j - width 32 - disclosure NEVER - - field :ID9_VUPDATE_CFG, [0], R - field :ID9_IGAM_LUT_MODE, [2..1], R - field :ID9_BNS_BYPASS, [3], R - field :ID9_ICSC_MODE, [5..4], R - field :ID9_DGAM_LUT_MODE, [8..6], R - field :ID9_HDR_BYPASS, [9], R - field :ID9_GAMUT_REMAP_MODE, [11..10], R - field :ID9_RGAM_LUT_MODE, [14..12], R - #1 free bit - field :ID9_OCSC_MODE, [18..16], R - field :ID9_DENORM_MODE, [21..19], R - field :ID9_ROUND_TRUNC_MODE, [25..22], R - field :ID9_DITHER_EN, [26], R - field :ID9_DITHER_MODE, [28..27], R - end -*/ - -#define TF_DEBUG_REG_LIST_SH_DCN10 \ - .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 4, \ - .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 16 - -#define TF_DEBUG_REG_LIST_MASK_DCN10 \ - .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30, \ - .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 0x70000 - -#define TF_REG_FIELD_LIST(type) \ - type EXT_OVERSCAN_LEFT; \ - type EXT_OVERSCAN_RIGHT; \ - type EXT_OVERSCAN_BOTTOM; \ - type EXT_OVERSCAN_TOP; \ - type OTG_H_BLANK_START; \ - type OTG_H_BLANK_END; \ - type OTG_V_BLANK_START; \ - type OTG_V_BLANK_END; \ - type PIXEL_DEPTH; \ - type PIXEL_EXPAN_MODE; \ - type PIXEL_REDUCE_MODE; \ - type DYNAMIC_PIXEL_DEPTH; \ - type DITHER_EN; \ - type INTERLEAVE_EN; \ - type LB_DATA_FORMAT__ALPHA_EN; \ - type MEMORY_CONFIG; \ - type LB_MAX_PARTITIONS; \ - type AUTOCAL_MODE; \ - type AUTOCAL_NUM_PIPE; \ - type AUTOCAL_PIPE_ID; \ - type SCL_BOUNDARY_MODE; \ - type SCL_BLACK_OFFSET_RGB_Y; \ - type SCL_BLACK_OFFSET_CBCR; \ - type SCL_V_NUM_TAPS; \ - type SCL_H_NUM_TAPS; \ - type SCL_V_NUM_TAPS_C; \ - type SCL_H_NUM_TAPS_C; \ - type SCL_COEF_RAM_TAP_PAIR_IDX; \ - type SCL_COEF_RAM_PHASE; \ - type SCL_COEF_RAM_FILTER_TYPE; \ - type SCL_COEF_RAM_EVEN_TAP_COEF; \ - type SCL_COEF_RAM_EVEN_TAP_COEF_EN; \ - type SCL_COEF_RAM_ODD_TAP_COEF; \ - type SCL_COEF_RAM_ODD_TAP_COEF_EN; \ - type SCL_H_2TAP_HARDCODE_COEF_EN; \ - type SCL_H_2TAP_SHARP_EN; \ - type SCL_H_2TAP_SHARP_FACTOR; \ - type SCL_V_2TAP_HARDCODE_COEF_EN; \ - type SCL_V_2TAP_SHARP_EN; \ - type SCL_V_2TAP_SHARP_FACTOR; \ - type SCL_COEF_RAM_SELECT; \ - type DSCL_MODE; \ - type RECOUT_START_X; \ - type RECOUT_START_Y; \ - type RECOUT_WIDTH; \ - type RECOUT_HEIGHT; \ - type MPC_WIDTH; \ - type MPC_HEIGHT; \ - type SCL_H_SCALE_RATIO; \ - type SCL_V_SCALE_RATIO; \ - type SCL_H_SCALE_RATIO_C; \ - type SCL_V_SCALE_RATIO_C; \ - type SCL_H_INIT_FRAC; \ - type SCL_H_INIT_INT; \ - type SCL_H_INIT_FRAC_C; \ - type SCL_H_INIT_INT_C; \ - type SCL_V_INIT_FRAC; \ - type SCL_V_INIT_INT; \ - type SCL_V_INIT_FRAC_BOT; \ - type SCL_V_INIT_INT_BOT; \ - type SCL_V_INIT_FRAC_C; \ - type SCL_V_INIT_INT_C; \ - type SCL_V_INIT_FRAC_BOT_C; \ - type SCL_V_INIT_INT_BOT_C; \ - type SCL_CHROMA_COEF_MODE; \ - type SCL_COEF_RAM_SELECT_CURRENT; \ - type LUT_MEM_PWR_FORCE; \ - type LUT_MEM_PWR_STATE; \ - type CM_GAMUT_REMAP_MODE; \ - type CM_GAMUT_REMAP_C11; \ - type CM_GAMUT_REMAP_C12; \ - type CM_GAMUT_REMAP_C13; \ - type CM_GAMUT_REMAP_C14; \ - type CM_GAMUT_REMAP_C21; \ - type CM_GAMUT_REMAP_C22; \ - type CM_GAMUT_REMAP_C23; \ - type CM_GAMUT_REMAP_C24; \ - type CM_GAMUT_REMAP_C31; \ - type CM_GAMUT_REMAP_C32; \ - type CM_GAMUT_REMAP_C33; \ - type CM_GAMUT_REMAP_C34; \ - type CM_COMA_C11; \ - type CM_COMA_C12; \ - type CM_COMA_C33; \ - type CM_COMA_C34; \ - type CM_COMB_C11; \ - type CM_COMB_C12; \ - type CM_COMB_C33; \ - type CM_COMB_C34; \ - type CM_OCSC_MODE; \ - type CM_OCSC_C11; \ - type CM_OCSC_C12; \ - type CM_OCSC_C33; \ - type CM_OCSC_C34; \ - type RGAM_MEM_PWR_FORCE; \ - type CM_RGAM_LUT_DATA; \ - type CM_RGAM_LUT_WRITE_EN_MASK; \ - type CM_RGAM_LUT_WRITE_SEL; \ - type CM_RGAM_LUT_INDEX; \ - type CM_RGAM_RAMB_EXP_REGION_START_B; \ - type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ - type CM_RGAM_RAMB_EXP_REGION_START_G; \ - type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ - type CM_RGAM_RAMB_EXP_REGION_START_R; \ - type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ - type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_RGAM_RAMB_EXP_REGION_END_B; \ - type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; \ - type CM_RGAM_RAMB_EXP_REGION_END_BASE_B; \ - type CM_RGAM_RAMB_EXP_REGION_END_G; \ - type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G; \ - type CM_RGAM_RAMB_EXP_REGION_END_BASE_G; \ - type CM_RGAM_RAMB_EXP_REGION_END_R; \ - type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R; \ - type CM_RGAM_RAMB_EXP_REGION_END_BASE_R; \ - type CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ - type CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ - type CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ - type CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ - type CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET; \ - type CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \ - type CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET; \ - type CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \ - type CM_RGAM_RAMA_EXP_REGION_START_B; \ - type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ - type CM_RGAM_RAMA_EXP_REGION_START_G; \ - type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ - type CM_RGAM_RAMA_EXP_REGION_START_R; \ - type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ - type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_RGAM_RAMA_EXP_REGION_END_B; \ - type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B; \ - type CM_RGAM_RAMA_EXP_REGION_END_BASE_B; \ - type CM_RGAM_RAMA_EXP_REGION_END_G; \ - type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G; \ - type CM_RGAM_RAMA_EXP_REGION_END_BASE_G; \ - type CM_RGAM_RAMA_EXP_REGION_END_R; \ - type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R; \ - type CM_RGAM_RAMA_EXP_REGION_END_BASE_R; \ - type CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ - type CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ - type CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ - type CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ - type CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET; \ - type CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \ - type CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET; \ - type CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \ - type CM_RGAM_LUT_MODE; \ - type CM_CMOUT_ROUND_TRUNC_MODE; \ - type CM_BLNDGAM_LUT_MODE; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R; \ - type CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET; \ - type CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \ - type CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET; \ - type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \ - type CM_BLNDGAM_LUT_WRITE_EN_MASK; \ - type CM_BLNDGAM_LUT_WRITE_SEL; \ - type CM_BLNDGAM_CONFIG_STATUS; \ - type CM_BLNDGAM_LUT_INDEX; \ - type BLNDGAM_MEM_PWR_FORCE; \ - type CM_3DLUT_MODE; \ - type CM_3DLUT_SIZE; \ - type CM_3DLUT_INDEX; \ - type CM_3DLUT_DATA0; \ - type CM_3DLUT_DATA1; \ - type CM_3DLUT_DATA_30BIT; \ - type CM_3DLUT_WRITE_EN_MASK; \ - type CM_3DLUT_RAM_SEL; \ - type CM_3DLUT_30BIT_EN; \ - type CM_3DLUT_CONFIG_STATUS; \ - type CM_3DLUT_READ_SEL; \ - type CM_SHAPER_LUT_MODE; \ - type CM_SHAPER_RAMB_EXP_REGION_START_B; \ - type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B; \ - type CM_SHAPER_RAMB_EXP_REGION_START_G; \ - type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G; \ - type CM_SHAPER_RAMB_EXP_REGION_START_R; \ - type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R; \ - type CM_SHAPER_RAMB_EXP_REGION_END_B; \ - type CM_SHAPER_RAMB_EXP_REGION_END_BASE_B; \ - type CM_SHAPER_RAMB_EXP_REGION_END_G; \ - type CM_SHAPER_RAMB_EXP_REGION_END_BASE_G; \ - type CM_SHAPER_RAMB_EXP_REGION_END_R; \ - type CM_SHAPER_RAMB_EXP_REGION_END_BASE_R; \ - type CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS; \ - type CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET; \ - type CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION_START_B; \ - type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B; \ - type CM_SHAPER_RAMA_EXP_REGION_START_G; \ - type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G; \ - type CM_SHAPER_RAMA_EXP_REGION_START_R; \ - type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R; \ - type CM_SHAPER_RAMA_EXP_REGION_END_B; \ - type CM_SHAPER_RAMA_EXP_REGION_END_BASE_B; \ - type CM_SHAPER_RAMA_EXP_REGION_END_G; \ - type CM_SHAPER_RAMA_EXP_REGION_END_BASE_G; \ - type CM_SHAPER_RAMA_EXP_REGION_END_R; \ - type CM_SHAPER_RAMA_EXP_REGION_END_BASE_R; \ - type CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS; \ - type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \ - type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \ - type CM_SHAPER_LUT_WRITE_EN_MASK; \ - type CM_SHAPER_CONFIG_STATUS; \ - type CM_SHAPER_LUT_WRITE_SEL; \ - type CM_SHAPER_LUT_INDEX; \ - type CM_SHAPER_LUT_DATA; \ - type CM_DGAM_CONFIG_STATUS; \ - type CM_ICSC_MODE; \ - type CM_ICSC_C11; \ - type CM_ICSC_C12; \ - type CM_ICSC_C33; \ - type CM_ICSC_C34; \ - type CM_BNS_BIAS_R; \ - type CM_BNS_BIAS_G; \ - type CM_BNS_BIAS_B; \ - type CM_BNS_SCALE_R; \ - type CM_BNS_SCALE_G; \ - type CM_BNS_SCALE_B; \ - type CM_DGAM_RAMB_EXP_REGION_START_B; \ - type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ - type CM_DGAM_RAMB_EXP_REGION_START_G; \ - type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ - type CM_DGAM_RAMB_EXP_REGION_START_R; \ - type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ - type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_DGAM_RAMB_EXP_REGION_END_B; \ - type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; \ - type CM_DGAM_RAMB_EXP_REGION_END_BASE_B; \ - type CM_DGAM_RAMB_EXP_REGION_END_G; \ - type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G; \ - type CM_DGAM_RAMB_EXP_REGION_END_BASE_G; \ - type CM_DGAM_RAMB_EXP_REGION_END_R; \ - type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R; \ - type CM_DGAM_RAMB_EXP_REGION_END_BASE_R; \ - type CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ - type CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ - type CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ - type CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ - type CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET; \ - type CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \ - type CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET; \ - type CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \ - type CM_DGAM_RAMA_EXP_REGION_START_B; \ - type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ - type CM_DGAM_RAMA_EXP_REGION_START_G; \ - type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ - type CM_DGAM_RAMA_EXP_REGION_START_R; \ - type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ - type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ - type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ - type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ - type CM_DGAM_RAMA_EXP_REGION_END_B; \ - type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B; \ - type CM_DGAM_RAMA_EXP_REGION_END_BASE_B; \ - type CM_DGAM_RAMA_EXP_REGION_END_G; \ - type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G; \ - type CM_DGAM_RAMA_EXP_REGION_END_BASE_G; \ - type CM_DGAM_RAMA_EXP_REGION_END_R; \ - type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R; \ - type CM_DGAM_RAMA_EXP_REGION_END_BASE_R; \ - type CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ - type CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ - type CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ - type CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ - type CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET; \ - type CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \ - type CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET; \ - type CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \ - type SHARED_MEM_PWR_DIS; \ - type CM_IGAM_LUT_FORMAT_R; \ - type CM_IGAM_LUT_FORMAT_G; \ - type CM_IGAM_LUT_FORMAT_B; \ - type CM_IGAM_LUT_HOST_EN; \ - type CM_IGAM_LUT_RW_MODE; \ - type CM_IGAM_LUT_WRITE_EN_MASK; \ - type CM_IGAM_LUT_SEL; \ - type CM_IGAM_LUT_SEQ_COLOR; \ - type CM_IGAM_DGAM_CONFIG_STATUS; \ - type CM_DGAM_LUT_WRITE_EN_MASK; \ - type CM_DGAM_LUT_WRITE_SEL; \ - type CM_DGAM_LUT_INDEX; \ - type CM_DGAM_LUT_DATA; \ - type CM_DGAM_LUT_MODE; \ - type CM_IGAM_LUT_MODE; \ - type CM_IGAM_INPUT_FORMAT; \ - type CM_IGAM_LUT_RW_INDEX; \ - type CM_BYPASS_EN; \ - type FORMAT_EXPANSION_MODE; \ - type CNVC_BYPASS; \ - type OUTPUT_FP; \ - type CNVC_SURFACE_PIXEL_FORMAT; \ - type CURSOR_MODE; \ - type CURSOR_PITCH; \ - type CURSOR_LINES_PER_CHUNK; \ - type CURSOR_ENABLE; \ - type CUR0_MODE; \ - type CUR0_EXPANSION_MODE; \ - type CUR0_ENABLE; \ - type CM_BYPASS; \ - type CM_TEST_DEBUG_INDEX; \ - type CM_TEST_DEBUG_DATA_ID9_ICSC_MODE; \ - type CM_TEST_DEBUG_DATA_ID9_OCSC_MODE;\ - type FORMAT_CONTROL__ALPHA_EN; \ - type CUR0_COLOR0; \ - type CUR0_COLOR1; \ - type DPPCLK_RATE_CONTROL; \ - type DPP_CLOCK_ENABLE; \ - type CM_HDR_MULT_COEF; \ - type CUR0_FP_BIAS; \ - type CUR0_FP_SCALE; - -struct dcn_dpp_shift { - TF_REG_FIELD_LIST(uint8_t) -}; - -struct dcn_dpp_mask { - TF_REG_FIELD_LIST(uint32_t) -}; - -#define DPP_COMMON_REG_VARIABLE_LIST \ - uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT; \ - uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM; \ - uint32_t OTG_H_BLANK; \ - uint32_t OTG_V_BLANK; \ - uint32_t DSCL_MEM_PWR_CTRL; \ - uint32_t DSCL_MEM_PWR_STATUS; \ - uint32_t SCL_MODE; \ - uint32_t LB_DATA_FORMAT; \ - uint32_t LB_MEMORY_CTRL; \ - uint32_t DSCL_AUTOCAL; \ - uint32_t DSCL_CONTROL; \ - uint32_t SCL_BLACK_OFFSET; \ - uint32_t SCL_TAP_CONTROL; \ - uint32_t SCL_COEF_RAM_TAP_SELECT; \ - uint32_t SCL_COEF_RAM_TAP_DATA; \ - uint32_t DSCL_2TAP_CONTROL; \ - uint32_t MPC_SIZE; \ - uint32_t SCL_HORZ_FILTER_SCALE_RATIO; \ - uint32_t SCL_VERT_FILTER_SCALE_RATIO; \ - uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C; \ - uint32_t SCL_VERT_FILTER_SCALE_RATIO_C; \ - uint32_t SCL_HORZ_FILTER_INIT; \ - uint32_t SCL_HORZ_FILTER_INIT_C; \ - uint32_t SCL_VERT_FILTER_INIT; \ - uint32_t SCL_VERT_FILTER_INIT_BOT; \ - uint32_t SCL_VERT_FILTER_INIT_C; \ - uint32_t SCL_VERT_FILTER_INIT_BOT_C; \ - uint32_t RECOUT_START; \ - uint32_t RECOUT_SIZE; \ - uint32_t CM_GAMUT_REMAP_CONTROL; \ - uint32_t CM_GAMUT_REMAP_C11_C12; \ - uint32_t CM_GAMUT_REMAP_C13_C14; \ - uint32_t CM_GAMUT_REMAP_C21_C22; \ - uint32_t CM_GAMUT_REMAP_C23_C24; \ - uint32_t CM_GAMUT_REMAP_C31_C32; \ - uint32_t CM_GAMUT_REMAP_C33_C34; \ - uint32_t CM_COMA_C11_C12; \ - uint32_t CM_COMA_C33_C34; \ - uint32_t CM_COMB_C11_C12; \ - uint32_t CM_COMB_C33_C34; \ - uint32_t CM_OCSC_CONTROL; \ - uint32_t CM_OCSC_C11_C12; \ - uint32_t CM_OCSC_C33_C34; \ - uint32_t CM_MEM_PWR_CTRL; \ - uint32_t CM_RGAM_LUT_DATA; \ - uint32_t CM_RGAM_LUT_WRITE_EN_MASK; \ - uint32_t CM_RGAM_LUT_INDEX; \ - uint32_t CM_RGAM_RAMB_START_CNTL_B; \ - uint32_t CM_RGAM_RAMB_START_CNTL_G; \ - uint32_t CM_RGAM_RAMB_START_CNTL_R; \ - uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B; \ - uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G; \ - uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R; \ - uint32_t CM_RGAM_RAMB_END_CNTL1_B; \ - uint32_t CM_RGAM_RAMB_END_CNTL2_B; \ - uint32_t CM_RGAM_RAMB_END_CNTL1_G; \ - uint32_t CM_RGAM_RAMB_END_CNTL2_G; \ - uint32_t CM_RGAM_RAMB_END_CNTL1_R; \ - uint32_t CM_RGAM_RAMB_END_CNTL2_R; \ - uint32_t CM_RGAM_RAMB_REGION_0_1; \ - uint32_t CM_RGAM_RAMB_REGION_32_33; \ - uint32_t CM_RGAM_RAMA_START_CNTL_B; \ - uint32_t CM_RGAM_RAMA_START_CNTL_G; \ - uint32_t CM_RGAM_RAMA_START_CNTL_R; \ - uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B; \ - uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G; \ - uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R; \ - uint32_t CM_RGAM_RAMA_END_CNTL1_B; \ - uint32_t CM_RGAM_RAMA_END_CNTL2_B; \ - uint32_t CM_RGAM_RAMA_END_CNTL1_G; \ - uint32_t CM_RGAM_RAMA_END_CNTL2_G; \ - uint32_t CM_RGAM_RAMA_END_CNTL1_R; \ - uint32_t CM_RGAM_RAMA_END_CNTL2_R; \ - uint32_t CM_RGAM_RAMA_REGION_0_1; \ - uint32_t CM_RGAM_RAMA_REGION_32_33; \ - uint32_t CM_RGAM_CONTROL; \ - uint32_t CM_CMOUT_CONTROL; \ - uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK; \ - uint32_t CM_BLNDGAM_CONTROL; \ - uint32_t CM_BLNDGAM_RAMB_START_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMB_START_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMB_START_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R; \ - uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R; \ - uint32_t CM_BLNDGAM_RAMB_REGION_0_1; \ - uint32_t CM_BLNDGAM_RAMB_REGION_2_3; \ - uint32_t CM_BLNDGAM_RAMB_REGION_4_5; \ - uint32_t CM_BLNDGAM_RAMB_REGION_6_7; \ - uint32_t CM_BLNDGAM_RAMB_REGION_8_9; \ - uint32_t CM_BLNDGAM_RAMB_REGION_10_11; \ - uint32_t CM_BLNDGAM_RAMB_REGION_12_13; \ - uint32_t CM_BLNDGAM_RAMB_REGION_14_15; \ - uint32_t CM_BLNDGAM_RAMB_REGION_16_17; \ - uint32_t CM_BLNDGAM_RAMB_REGION_18_19; \ - uint32_t CM_BLNDGAM_RAMB_REGION_20_21; \ - uint32_t CM_BLNDGAM_RAMB_REGION_22_23; \ - uint32_t CM_BLNDGAM_RAMB_REGION_24_25; \ - uint32_t CM_BLNDGAM_RAMB_REGION_26_27; \ - uint32_t CM_BLNDGAM_RAMB_REGION_28_29; \ - uint32_t CM_BLNDGAM_RAMB_REGION_30_31; \ - uint32_t CM_BLNDGAM_RAMB_REGION_32_33; \ - uint32_t CM_BLNDGAM_RAMA_START_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMA_START_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMA_START_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R; \ - uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R; \ - uint32_t CM_BLNDGAM_RAMA_REGION_0_1; \ - uint32_t CM_BLNDGAM_RAMA_REGION_2_3; \ - uint32_t CM_BLNDGAM_RAMA_REGION_4_5; \ - uint32_t CM_BLNDGAM_RAMA_REGION_6_7; \ - uint32_t CM_BLNDGAM_RAMA_REGION_8_9; \ - uint32_t CM_BLNDGAM_RAMA_REGION_10_11; \ - uint32_t CM_BLNDGAM_RAMA_REGION_12_13; \ - uint32_t CM_BLNDGAM_RAMA_REGION_14_15; \ - uint32_t CM_BLNDGAM_RAMA_REGION_16_17; \ - uint32_t CM_BLNDGAM_RAMA_REGION_18_19; \ - uint32_t CM_BLNDGAM_RAMA_REGION_20_21; \ - uint32_t CM_BLNDGAM_RAMA_REGION_22_23; \ - uint32_t CM_BLNDGAM_RAMA_REGION_24_25; \ - uint32_t CM_BLNDGAM_RAMA_REGION_26_27; \ - uint32_t CM_BLNDGAM_RAMA_REGION_28_29; \ - uint32_t CM_BLNDGAM_RAMA_REGION_30_31; \ - uint32_t CM_BLNDGAM_RAMA_REGION_32_33; \ - uint32_t CM_BLNDGAM_LUT_INDEX; \ - uint32_t CM_3DLUT_MODE; \ - uint32_t CM_3DLUT_INDEX; \ - uint32_t CM_3DLUT_DATA; \ - uint32_t CM_3DLUT_DATA_30BIT; \ - uint32_t CM_3DLUT_READ_WRITE_CONTROL; \ - uint32_t CM_SHAPER_LUT_WRITE_EN_MASK; \ - uint32_t CM_SHAPER_CONTROL; \ - uint32_t CM_SHAPER_RAMB_START_CNTL_B; \ - uint32_t CM_SHAPER_RAMB_START_CNTL_G; \ - uint32_t CM_SHAPER_RAMB_START_CNTL_R; \ - uint32_t CM_SHAPER_RAMB_END_CNTL_B; \ - uint32_t CM_SHAPER_RAMB_END_CNTL_G; \ - uint32_t CM_SHAPER_RAMB_END_CNTL_R; \ - uint32_t CM_SHAPER_RAMB_REGION_0_1; \ - uint32_t CM_SHAPER_RAMB_REGION_2_3; \ - uint32_t CM_SHAPER_RAMB_REGION_4_5; \ - uint32_t CM_SHAPER_RAMB_REGION_6_7; \ - uint32_t CM_SHAPER_RAMB_REGION_8_9; \ - uint32_t CM_SHAPER_RAMB_REGION_10_11; \ - uint32_t CM_SHAPER_RAMB_REGION_12_13; \ - uint32_t CM_SHAPER_RAMB_REGION_14_15; \ - uint32_t CM_SHAPER_RAMB_REGION_16_17; \ - uint32_t CM_SHAPER_RAMB_REGION_18_19; \ - uint32_t CM_SHAPER_RAMB_REGION_20_21; \ - uint32_t CM_SHAPER_RAMB_REGION_22_23; \ - uint32_t CM_SHAPER_RAMB_REGION_24_25; \ - uint32_t CM_SHAPER_RAMB_REGION_26_27; \ - uint32_t CM_SHAPER_RAMB_REGION_28_29; \ - uint32_t CM_SHAPER_RAMB_REGION_30_31; \ - uint32_t CM_SHAPER_RAMB_REGION_32_33; \ - uint32_t CM_SHAPER_RAMA_START_CNTL_B; \ - uint32_t CM_SHAPER_RAMA_START_CNTL_G; \ - uint32_t CM_SHAPER_RAMA_START_CNTL_R; \ - uint32_t CM_SHAPER_RAMA_END_CNTL_B; \ - uint32_t CM_SHAPER_RAMA_END_CNTL_G; \ - uint32_t CM_SHAPER_RAMA_END_CNTL_R; \ - uint32_t CM_SHAPER_RAMA_REGION_0_1; \ - uint32_t CM_SHAPER_RAMA_REGION_2_3; \ - uint32_t CM_SHAPER_RAMA_REGION_4_5; \ - uint32_t CM_SHAPER_RAMA_REGION_6_7; \ - uint32_t CM_SHAPER_RAMA_REGION_8_9; \ - uint32_t CM_SHAPER_RAMA_REGION_10_11; \ - uint32_t CM_SHAPER_RAMA_REGION_12_13; \ - uint32_t CM_SHAPER_RAMA_REGION_14_15; \ - uint32_t CM_SHAPER_RAMA_REGION_16_17; \ - uint32_t CM_SHAPER_RAMA_REGION_18_19; \ - uint32_t CM_SHAPER_RAMA_REGION_20_21; \ - uint32_t CM_SHAPER_RAMA_REGION_22_23; \ - uint32_t CM_SHAPER_RAMA_REGION_24_25; \ - uint32_t CM_SHAPER_RAMA_REGION_26_27; \ - uint32_t CM_SHAPER_RAMA_REGION_28_29; \ - uint32_t CM_SHAPER_RAMA_REGION_30_31; \ - uint32_t CM_SHAPER_RAMA_REGION_32_33; \ - uint32_t CM_SHAPER_LUT_INDEX; \ - uint32_t CM_SHAPER_LUT_DATA; \ - uint32_t CM_ICSC_CONTROL; \ - uint32_t CM_ICSC_C11_C12; \ - uint32_t CM_ICSC_C33_C34; \ - uint32_t CM_BNS_VALUES_R; \ - uint32_t CM_BNS_VALUES_G; \ - uint32_t CM_BNS_VALUES_B; \ - uint32_t CM_DGAM_RAMB_START_CNTL_B; \ - uint32_t CM_DGAM_RAMB_START_CNTL_G; \ - uint32_t CM_DGAM_RAMB_START_CNTL_R; \ - uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B; \ - uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G; \ - uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R; \ - uint32_t CM_DGAM_RAMB_END_CNTL1_B; \ - uint32_t CM_DGAM_RAMB_END_CNTL2_B; \ - uint32_t CM_DGAM_RAMB_END_CNTL1_G; \ - uint32_t CM_DGAM_RAMB_END_CNTL2_G; \ - uint32_t CM_DGAM_RAMB_END_CNTL1_R; \ - uint32_t CM_DGAM_RAMB_END_CNTL2_R; \ - uint32_t CM_DGAM_RAMB_REGION_0_1; \ - uint32_t CM_DGAM_RAMB_REGION_14_15; \ - uint32_t CM_DGAM_RAMA_START_CNTL_B; \ - uint32_t CM_DGAM_RAMA_START_CNTL_G; \ - uint32_t CM_DGAM_RAMA_START_CNTL_R; \ - uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B; \ - uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G; \ - uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R; \ - uint32_t CM_DGAM_RAMA_END_CNTL1_B; \ - uint32_t CM_DGAM_RAMA_END_CNTL2_B; \ - uint32_t CM_DGAM_RAMA_END_CNTL1_G; \ - uint32_t CM_DGAM_RAMA_END_CNTL2_G; \ - uint32_t CM_DGAM_RAMA_END_CNTL1_R; \ - uint32_t CM_DGAM_RAMA_END_CNTL2_R; \ - uint32_t CM_DGAM_RAMA_REGION_0_1; \ - uint32_t CM_DGAM_RAMA_REGION_14_15; \ - uint32_t CM_DGAM_LUT_WRITE_EN_MASK; \ - uint32_t CM_DGAM_LUT_INDEX; \ - uint32_t CM_DGAM_LUT_DATA; \ - uint32_t CM_CONTROL; \ - uint32_t CM_DGAM_CONTROL; \ - uint32_t CM_IGAM_CONTROL; \ - uint32_t CM_IGAM_LUT_RW_CONTROL; \ - uint32_t CM_IGAM_LUT_RW_INDEX; \ - uint32_t CM_IGAM_LUT_SEQ_COLOR; \ - uint32_t CM_TEST_DEBUG_INDEX; \ - uint32_t CM_TEST_DEBUG_DATA; \ - uint32_t FORMAT_CONTROL; \ - uint32_t CNVC_SURFACE_PIXEL_FORMAT; \ - uint32_t CURSOR_CONTROL; \ - uint32_t CURSOR0_CONTROL; \ - uint32_t CURSOR0_COLOR0; \ - uint32_t CURSOR0_COLOR1; \ - uint32_t DPP_CONTROL; \ - uint32_t CM_HDR_MULT_COEF; \ - uint32_t CURSOR0_FP_SCALE_BIAS; - -struct dcn_dpp_registers { - DPP_COMMON_REG_VARIABLE_LIST -}; - -struct dcn10_dpp { - struct dpp base; - - const struct dcn_dpp_registers *tf_regs; - const struct dcn_dpp_shift *tf_shift; - const struct dcn_dpp_mask *tf_mask; - - const uint16_t *filter_v; - const uint16_t *filter_h; - const uint16_t *filter_v_c; - const uint16_t *filter_h_c; - int lb_pixel_depth_supported; - int lb_memory_size; - int lb_bits_per_entry; - bool is_write_to_ram_a_safe; - struct scaler_data scl_data; - struct pwl_params pwl_data; -}; - -enum dcn10_input_csc_select { - INPUT_CSC_SELECT_BYPASS = 0, - INPUT_CSC_SELECT_ICSC = 1, - INPUT_CSC_SELECT_COMA = 2 -}; - -void dpp1_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes); - -void dpp1_set_cursor_position( - struct dpp *dpp_base, - const struct dc_cursor_position *pos, - const struct dc_cursor_mi_param *param, - uint32_t width, - uint32_t height); - -void dpp1_cnv_set_optional_cursor_attributes( - struct dpp *dpp_base, - struct dpp_cursor_attributes *attr); - -bool dpp1_dscl_is_lb_conf_valid( - int ceil_vratio, - int num_partitions, - int vtaps); - -void dpp1_dscl_calc_lb_num_partitions( - const struct scaler_data *scl_data, - enum lb_memory_config lb_config, - int *num_part_y, - int *num_part_c); - -void dpp1_degamma_ram_select( - struct dpp *dpp_base, - bool use_ram_a); - -void dpp1_program_degamma_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params); - -void dpp1_program_degamma_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params); - -void dpp1_program_degamma_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num, - bool is_ram_a); - -void dpp1_power_on_degamma_lut( - struct dpp *dpp_base, - bool power_on); - -void dpp1_program_input_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn10_input_csc_select select, - const struct out_csc_color_matrix *tbl_entry); - -void dpp1_program_bias_and_scale( - struct dpp *dpp_base, - struct dc_bias_and_scale *params); - -void dpp1_program_input_lut( - struct dpp *dpp_base, - const struct dc_gamma *gamma); - -void dpp1_full_bypass(struct dpp *dpp_base); - -void dpp1_set_degamma( - struct dpp *dpp_base, - enum ipp_degamma_mode mode); - -void dpp1_set_degamma_pwl(struct dpp *dpp_base, - const struct pwl_params *params); - - -void dpp_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s); - -void dpp_reset(struct dpp *dpp_base); - -void dpp1_cm_program_regamma_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num); - -void dpp1_cm_power_on_regamma_lut( - struct dpp *dpp_base, - bool power_on); - -void dpp1_cm_configure_regamma_lut( - struct dpp *dpp_base, - bool is_ram_a); - -/*program re gamma RAM A*/ -void dpp1_cm_program_regamma_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params); - -/*program re gamma RAM B*/ -void dpp1_cm_program_regamma_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params); -void dpp1_cm_set_output_csc_adjustment( - struct dpp *dpp_base, - const uint16_t *regval); - -void dpp1_cm_set_output_csc_default( - struct dpp *dpp_base, - enum dc_color_space colorspace); - -void dpp1_cm_set_gamut_remap( - struct dpp *dpp, - const struct dpp_grph_csc_adjustment *adjust); - -void dpp1_dscl_set_scaler_manual_scale( - struct dpp *dpp_base, - const struct scaler_data *scl_data); - -void dpp1_cnv_setup ( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut); - -void dpp1_dppclk_control( - struct dpp *dpp_base, - bool dppclk_div, - bool enable); - -void dpp1_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier); - -bool dpp1_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps); - -void dpp1_construct(struct dcn10_dpp *dpp1, - struct dc_context *ctx, - uint32_t inst, - const struct dcn_dpp_registers *tf_regs, - const struct dcn_dpp_shift *tf_shift, - const struct dcn_dpp_mask *tf_mask); - -void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust); -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c deleted file mode 100644 index 2f994a3a0b9c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ /dev/null @@ -1,884 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn10_dpp.h" -#include "basics/conversion.h" -#include "dcn10_cm_common.h" - -#define NUM_PHASES 64 -#define HORZ_MAX_TAPS 8 -#define VERT_MAX_TAPS 8 - -#define BLACK_OFFSET_RGB_Y 0x0 -#define BLACK_OFFSET_CBCR 0x8000 - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) - - -enum dcn10_coef_filter_type_sel { - SCL_COEF_LUMA_VERT_FILTER = 0, - SCL_COEF_LUMA_HORZ_FILTER = 1, - SCL_COEF_CHROMA_VERT_FILTER = 2, - SCL_COEF_CHROMA_HORZ_FILTER = 3, - SCL_COEF_ALPHA_VERT_FILTER = 4, - SCL_COEF_ALPHA_HORZ_FILTER = 5 -}; - -enum dscl_autocal_mode { - AUTOCAL_MODE_OFF = 0, - - /* Autocal calculate the scaling ratio and initial phase and the - * DSCL_MODE_SEL must be set to 1 - */ - AUTOCAL_MODE_AUTOSCALE = 1, - /* Autocal perform auto centering without replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOCENTER = 2, - /* Autocal perform auto centering and auto replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOREPLICATE = 3 -}; - -enum dscl_mode_sel { - DSCL_MODE_SCALING_444_BYPASS = 0, - DSCL_MODE_SCALING_444_RGB_ENABLE = 1, - DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, - DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, - DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, - DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, - DSCL_MODE_DSCL_BYPASS = 6 -}; - -static void program_gamut_remap( - struct dcn10_dpp *dpp, - const uint16_t *regval, - enum gamut_remap_select select) -{ - uint16_t selection = 0; - struct color_matrices_reg gam_regs; - - if (regval == NULL || select == GAMUT_REMAP_BYPASS) { - REG_SET(CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, 0); - return; - } - switch (select) { - case GAMUT_REMAP_COEFF: - selection = 1; - break; - case GAMUT_REMAP_COMA_COEFF: - selection = 2; - break; - case GAMUT_REMAP_COMB_COEFF: - selection = 3; - break; - default: - break; - } - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - - if (select == GAMUT_REMAP_COEFF) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } else if (select == GAMUT_REMAP_COMA_COEFF) { - - gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } else { - - gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - } - - REG_SET( - CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, selection); - -} - -void dpp1_cm_set_gamut_remap( - struct dpp *dpp_base, - const struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - int i = 0; - - if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) - /* Bypass if type is bypass or hw */ - program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS); - else { - struct fixed31_32 arr_matrix[12]; - uint16_t arr_reg_val[12]; - - for (i = 0; i < 12; i++) - arr_matrix[i] = adjust->temperature_matrix[i]; - - convert_float_matrix( - arr_reg_val, arr_matrix, 12); - - program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF); - } -} - -static void read_gamut_remap(struct dcn10_dpp *dpp, - uint16_t *regval, - enum gamut_remap_select *select) -{ - struct color_matrices_reg gam_regs; - uint32_t selection; - - REG_GET(CM_GAMUT_REMAP_CONTROL, - CM_GAMUT_REMAP_MODE, &selection); - - *select = selection; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - if (*select == GAMUT_REMAP_COEFF) { - - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - - cm_helper_read_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } else if (*select == GAMUT_REMAP_COMA_COEFF) { - - gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); - - cm_helper_read_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } else if (*select == GAMUT_REMAP_COMB_COEFF) { - - gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); - - cm_helper_read_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - } -} - -void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - uint16_t arr_reg_val[12]; - enum gamut_remap_select select; - - read_gamut_remap(dpp, arr_reg_val, &select); - - if (select == GAMUT_REMAP_BYPASS) { - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - return; - } - - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - convert_hw_matrix(adjust->temperature_matrix, - arr_reg_val, ARRAY_SIZE(arr_reg_val)); -} - -static void dpp1_cm_program_color_matrix( - struct dcn10_dpp *dpp, - const uint16_t *regval) -{ - uint32_t ocsc_mode; - uint32_t cur_mode; - struct color_matrices_reg gam_regs; - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - - /* determine which CSC matrix (ocsc or comb) we are using - * currently. select the alternate set to double buffer - * the CSC update so CSC is updated on frame boundary - */ - REG_SET(CM_TEST_DEBUG_INDEX, 0, - CM_TEST_DEBUG_INDEX, 9); - - REG_GET(CM_TEST_DEBUG_DATA, - CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode); - - if (cur_mode != 4) - ocsc_mode = 4; - else - ocsc_mode = 5; - - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12; - - if (ocsc_mode == 4) { - - gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34); - - } else { - - gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); - - } - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); - -} - -void dpp1_cm_set_output_csc_default( - struct dpp *dpp_base, - enum dc_color_space colorspace) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - const uint16_t *regval = NULL; - int arr_size; - - regval = find_color_matrix(colorspace, &arr_size); - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - - dpp1_cm_program_color_matrix(dpp, regval); -} - -static void dpp1_cm_get_reg_field( - struct dcn10_dpp *dpp, - struct xfer_func_reg *reg) -{ - reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B; - reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; - reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; - reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B; - reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; -} - -static void dpp1_cm_get_degamma_reg_field( - struct dcn10_dpp *dpp, - struct xfer_func_reg *reg) -{ - reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B; - reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; - reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; - reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B; - reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; -} -void dpp1_cm_set_output_csc_adjustment( - struct dpp *dpp_base, - const uint16_t *regval) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - dpp1_cm_program_color_matrix(dpp, regval); -} - -void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base, - bool power_on) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_SET(CM_MEM_PWR_CTRL, 0, - RGAM_MEM_PWR_FORCE, power_on == true ? 0:1); - -} - -void dpp1_cm_program_regamma_lut(struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_SEQ_START(); - - for (i = 0 ; i < num; i++) { - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg); - - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg); - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg); - REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg); - - } - -} - -void dpp1_cm_configure_regamma_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, - CM_RGAM_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, - CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); - REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0); -} - -/*program re gamma RAM A*/ -void dpp1_cm_program_regamma_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dpp1_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R); - gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1); - gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33); - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); - -} - -/*program re gamma RAM B*/ -void dpp1_cm_program_regamma_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dpp1_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R); - gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1); - gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33); - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -void dpp1_program_input_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn10_input_csc_select input_select, - const struct out_csc_color_matrix *tbl_entry) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - int i; - int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); - const uint16_t *regval = NULL; - uint32_t cur_select = 0; - enum dcn10_input_csc_select select; - struct color_matrices_reg gam_regs; - - if (input_select == INPUT_CSC_SELECT_BYPASS) { - REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); - return; - } - - if (tbl_entry == NULL) { - for (i = 0; i < arr_size; i++) - if (dpp_input_csc_matrix[i].color_space == color_space) { - regval = dpp_input_csc_matrix[i].regval; - break; - } - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - } else { - regval = tbl_entry->regval; - } - - /* determine which CSC matrix (icsc or coma) we are using - * currently. select the alternate set to double buffer - * the CSC update so CSC is updated on frame boundary - */ - REG_SET(CM_TEST_DEBUG_INDEX, 0, - CM_TEST_DEBUG_INDEX, 9); - - REG_GET(CM_TEST_DEBUG_DATA, - CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select); - - if (cur_select != INPUT_CSC_SELECT_ICSC) - select = INPUT_CSC_SELECT_ICSC; - else - select = INPUT_CSC_SELECT_COMA; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; - - if (select == INPUT_CSC_SELECT_ICSC) { - - gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); - - } else { - - gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); - - } - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - REG_SET(CM_ICSC_CONTROL, 0, - CM_ICSC_MODE, select); -} - -//keep here for now, decide multi dce support later -void dpp1_program_bias_and_scale( - struct dpp *dpp_base, - struct dc_bias_and_scale *params) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_SET_2(CM_BNS_VALUES_R, 0, - CM_BNS_SCALE_R, params->scale_red, - CM_BNS_BIAS_R, params->bias_red); - - REG_SET_2(CM_BNS_VALUES_G, 0, - CM_BNS_SCALE_G, params->scale_green, - CM_BNS_BIAS_G, params->bias_green); - - REG_SET_2(CM_BNS_VALUES_B, 0, - CM_BNS_SCALE_B, params->scale_blue, - CM_BNS_BIAS_B, params->bias_blue); - -} - -/*program de gamma RAM B*/ -void dpp1_program_degamma_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R); - gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1); - gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15); - - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -/*program de gamma RAM A*/ -void dpp1_program_degamma_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R); - gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1); - gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15); - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -void dpp1_power_on_degamma_lut( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_SET(CM_MEM_PWR_CTRL, 0, - SHARED_MEM_PWR_DIS, power_on ? 0:1); - -} - -static void dpp1_enable_cm_block( - struct dpp *dpp_base) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8); - REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0); -} - -void dpp1_set_degamma( - struct dpp *dpp_base, - enum ipp_degamma_mode mode) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - dpp1_enable_cm_block(dpp_base); - - switch (mode) { - case IPP_DEGAMMA_MODE_BYPASS: - /* Setting de gamma bypass for now */ - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); - break; - case IPP_DEGAMMA_MODE_HW_sRGB: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); - break; - case IPP_DEGAMMA_MODE_HW_xvYCC: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); - break; - case IPP_DEGAMMA_MODE_USER_PWL: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); - break; - default: - BREAK_TO_DEBUGGER(); - break; - } - - REG_SEQ_SUBMIT(); - REG_SEQ_WAIT_DONE(); -} - -void dpp1_degamma_ram_select( - struct dpp *dpp_base, - bool use_ram_a) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - if (use_ram_a) - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); - else - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4); - -} - -static bool dpp1_degamma_ram_inuse( - struct dpp *dpp_base, - bool *ram_a_inuse) -{ - bool ret = false; - uint32_t status_reg = 0; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, - &status_reg); - - if (status_reg == 9) { - *ram_a_inuse = true; - ret = true; - } else if (status_reg == 10) { - *ram_a_inuse = false; - ret = true; - } - return ret; -} - -void dpp1_program_degamma_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num, - bool is_ram_a) -{ - uint32_t i; - - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0); - REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, - CM_DGAM_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, - is_ram_a == true ? 0:1); - - REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); - for (i = 0 ; i < num; i++) { - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); - - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); - } -} - -void dpp1_set_degamma_pwl(struct dpp *dpp_base, - const struct pwl_params *params) -{ - bool is_ram_a = true; - - dpp1_power_on_degamma_lut(dpp_base, true); - dpp1_enable_cm_block(dpp_base); - dpp1_degamma_ram_inuse(dpp_base, &is_ram_a); - if (is_ram_a == true) - dpp1_program_degamma_lutb_settings(dpp_base, params); - else - dpp1_program_degamma_luta_settings(dpp_base, params); - - dpp1_program_degamma_lut(dpp_base, params->rgb_resulted, - params->hw_points_num, !is_ram_a); - dpp1_degamma_ram_select(dpp_base, !is_ram_a); -} - -void dpp1_full_bypass(struct dpp *dpp_base) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - /* Input pixel format: ARGB8888 */ - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, 0x8); - - /* Zero expansion */ - REG_SET_3(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_CONTROL__ALPHA_EN, 0, - FORMAT_EXPANSION_MODE, 0); - - /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ - if (dpp->tf_mask->CM_BYPASS_EN) - REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); - else - REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); - - /* Setting degamma bypass for now */ - REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); -} - -static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base, - bool *ram_a_inuse) -{ - bool in_use = false; - uint32_t status_reg = 0; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, - &status_reg); - - // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB - if (status_reg == 1 || status_reg == 3 || status_reg == 4) { - *ram_a_inuse = true; - in_use = true; - // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB - } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) { - *ram_a_inuse = false; - in_use = true; - } - return in_use; -} - -/* - * Input gamma LUT currently supports 256 values only. This means input color - * can have a maximum of 8 bits per channel (= 256 possible values) in order to - * have a one-to-one mapping with the LUT. Truncation will occur with color - * values greater than 8 bits. - * - * In the future, this function should support additional input gamma methods, - * such as piecewise linear mapping, and input gamma bypass. - */ -void dpp1_program_input_lut( - struct dpp *dpp_base, - const struct dc_gamma *gamma) -{ - int i; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - bool rama_occupied = false; - uint32_t ram_num; - // Power on LUT memory. - REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1); - dpp1_enable_cm_block(dpp_base); - // Determine whether to use RAM A or RAM B - dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied); - if (!rama_occupied) - REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0); - else - REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1); - // RW mode is 256-entry LUT - REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0); - // IGAM Input format should be 8 bits per channel. - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0); - // Do not mask any R,G,B values - REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7); - // LUT-256, unsigned, integer, new u0.12 format - REG_UPDATE_3( - CM_IGAM_CONTROL, - CM_IGAM_LUT_FORMAT_R, 3, - CM_IGAM_LUT_FORMAT_G, 3, - CM_IGAM_LUT_FORMAT_B, 3); - // Start at index 0 of IGAM LUT - REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0); - for (i = 0; i < gamma->num_entries; i++) { - REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, - dc_fixpt_round( - gamma->entries.red[i])); - REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, - dc_fixpt_round( - gamma->entries.green[i])); - REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, - dc_fixpt_round( - gamma->entries.blue[i])); - } - // Power off LUT memory - REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0); - // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB - REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2); - REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num); -} - -void dpp1_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c deleted file mode 100644 index 5ca9ab8a76e8..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ /dev/null @@ -1,696 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn10_dpp.h" -#include "basics/conversion.h" - - -#define NUM_PHASES 64 -#define HORZ_MAX_TAPS 8 -#define VERT_MAX_TAPS 8 - -#define BLACK_OFFSET_RGB_Y 0x0 -#define BLACK_OFFSET_CBCR 0x8000 - - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -enum dcn10_coef_filter_type_sel { - SCL_COEF_LUMA_VERT_FILTER = 0, - SCL_COEF_LUMA_HORZ_FILTER = 1, - SCL_COEF_CHROMA_VERT_FILTER = 2, - SCL_COEF_CHROMA_HORZ_FILTER = 3, - SCL_COEF_ALPHA_VERT_FILTER = 4, - SCL_COEF_ALPHA_HORZ_FILTER = 5 -}; - -enum dscl_autocal_mode { - AUTOCAL_MODE_OFF = 0, - - /* Autocal calculate the scaling ratio and initial phase and the - * DSCL_MODE_SEL must be set to 1 - */ - AUTOCAL_MODE_AUTOSCALE = 1, - /* Autocal perform auto centering without replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOCENTER = 2, - /* Autocal perform auto centering and auto replication and the - * DSCL_MODE_SEL must be set to 0 - */ - AUTOCAL_MODE_AUTOREPLICATE = 3 -}; - -enum dscl_mode_sel { - DSCL_MODE_SCALING_444_BYPASS = 0, - DSCL_MODE_SCALING_444_RGB_ENABLE = 1, - DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, - DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, - DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, - DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, - DSCL_MODE_DSCL_BYPASS = 6 -}; - -static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth) -{ - if (depth == LB_PIXEL_DEPTH_30BPP) - return 0; /* 10 bpc */ - else if (depth == LB_PIXEL_DEPTH_24BPP) - return 1; /* 8 bpc */ - else if (depth == LB_PIXEL_DEPTH_18BPP) - return 2; /* 6 bpc */ - else if (depth == LB_PIXEL_DEPTH_36BPP) - return 3; /* 12 bpc */ - else { - ASSERT(0); - return -1; /* Unsupported */ - } -} - -static bool dpp1_dscl_is_video_format(enum pixel_format format) -{ - if (format >= PIXEL_FORMAT_VIDEO_BEGIN - && format <= PIXEL_FORMAT_VIDEO_END) - return true; - else - return false; -} - -static bool dpp1_dscl_is_420_format(enum pixel_format format) -{ - if (format == PIXEL_FORMAT_420BPP8 || - format == PIXEL_FORMAT_420BPP10) - return true; - else - return false; -} - -static enum dscl_mode_sel dpp1_dscl_get_dscl_mode( - struct dpp *dpp_base, - const struct scaler_data *data, - bool dbg_always_scale) -{ - const long long one = dc_fixpt_one.value; - - if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { - /* DSCL is processing data in fixed format */ - if (data->format == PIXEL_FORMAT_FP16) - return DSCL_MODE_DSCL_BYPASS; - } - - if (data->ratios.horz.value == one - && data->ratios.vert.value == one - && data->ratios.horz_c.value == one - && data->ratios.vert_c.value == one - && !dbg_always_scale) - return DSCL_MODE_SCALING_444_BYPASS; - - if (!dpp1_dscl_is_420_format(data->format)) { - if (dpp1_dscl_is_video_format(data->format)) - return DSCL_MODE_SCALING_444_YCBCR_ENABLE; - else - return DSCL_MODE_SCALING_444_RGB_ENABLE; - } - if (data->ratios.horz.value == one && data->ratios.vert.value == one) - return DSCL_MODE_SCALING_420_LUMA_BYPASS; - if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one) - return DSCL_MODE_SCALING_420_CHROMA_BYPASS; - - return DSCL_MODE_SCALING_420_YCBCR_ENABLE; -} - -static void dpp1_power_on_dscl( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - - if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) { - if (power_on) { - REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0); - REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5); - } else { - if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) { - dpp->base.ctx->dc->optimized_required = true; - dpp->base.deferred_reg_writes.bits.disable_dscl = true; - } else { - REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); - } - } - } -} - - -static void dpp1_dscl_set_lb( - struct dcn10_dpp *dpp, - const struct line_buffer_params *lb_params, - enum lb_memory_config mem_size_config) -{ - uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */ - - /* LB */ - if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { - /* DSCL caps: pixel data processed in fixed format */ - uint32_t pixel_depth = dpp1_dscl_get_pixel_depth_val(lb_params->depth); - uint32_t dyn_pix_depth = lb_params->dynamic_pixel_depth; - - REG_SET_7(LB_DATA_FORMAT, 0, - PIXEL_DEPTH, pixel_depth, /* Pixel depth stored in LB */ - PIXEL_EXPAN_MODE, lb_params->pixel_expan_mode, /* Pixel expansion mode */ - PIXEL_REDUCE_MODE, 1, /* Pixel reduction mode: Rounding */ - DYNAMIC_PIXEL_DEPTH, dyn_pix_depth, /* Dynamic expansion pixel depth */ - DITHER_EN, 0, /* Dithering enable: Disabled */ - INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ - LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ - } else { - /* DSCL caps: pixel data processed in float format */ - REG_SET_2(LB_DATA_FORMAT, 0, - INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ - LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ - } - - if (dpp->base.caps->max_lb_partitions == 31) - max_partitions = 31; - - REG_SET_2(LB_MEMORY_CTRL, 0, - MEMORY_CONFIG, mem_size_config, - LB_MAX_PARTITIONS, max_partitions); -} - -static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio) -{ - if (taps == 8) - return get_filter_8tap_64p(ratio); - else if (taps == 7) - return get_filter_7tap_64p(ratio); - else if (taps == 6) - return get_filter_6tap_64p(ratio); - else if (taps == 5) - return get_filter_5tap_64p(ratio); - else if (taps == 4) - return get_filter_4tap_64p(ratio); - else if (taps == 3) - return get_filter_3tap_64p(ratio); - else if (taps == 2) - return get_filter_2tap_64p(); - else if (taps == 1) - return NULL; - else { - /* should never happen, bug */ - BREAK_TO_DEBUGGER(); - return NULL; - } -} - -static void dpp1_dscl_set_scaler_filter( - struct dcn10_dpp *dpp, - uint32_t taps, - enum dcn10_coef_filter_type_sel filter_type, - const uint16_t *filter) -{ - const int tap_pairs = (taps + 1) / 2; - int phase; - int pair; - uint16_t odd_coef, even_coef; - - REG_SET_3(SCL_COEF_RAM_TAP_SELECT, 0, - SCL_COEF_RAM_TAP_PAIR_IDX, 0, - SCL_COEF_RAM_PHASE, 0, - SCL_COEF_RAM_FILTER_TYPE, filter_type); - - for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) { - for (pair = 0; pair < tap_pairs; pair++) { - even_coef = filter[phase * taps + 2 * pair]; - if ((pair * 2 + 1) < taps) - odd_coef = filter[phase * taps + 2 * pair + 1]; - else - odd_coef = 0; - - REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0, - /* Even tap coefficient (bits 1:0 fixed to 0) */ - SCL_COEF_RAM_EVEN_TAP_COEF, even_coef, - /* Write/read control for even coefficient */ - SCL_COEF_RAM_EVEN_TAP_COEF_EN, 1, - /* Odd tap coefficient (bits 1:0 fixed to 0) */ - SCL_COEF_RAM_ODD_TAP_COEF, odd_coef, - /* Write/read control for odd coefficient */ - SCL_COEF_RAM_ODD_TAP_COEF_EN, 1); - } - } - -} - -static void dpp1_dscl_set_scl_filter( - struct dcn10_dpp *dpp, - const struct scaler_data *scl_data, - bool chroma_coef_mode) -{ - bool h_2tap_hardcode_coef_en = false; - bool v_2tap_hardcode_coef_en = false; - bool h_2tap_sharp_en = false; - bool v_2tap_sharp_en = false; - uint32_t h_2tap_sharp_factor = scl_data->sharpness.horz; - uint32_t v_2tap_sharp_factor = scl_data->sharpness.vert; - bool coef_ram_current; - const uint16_t *filter_h = NULL; - const uint16_t *filter_v = NULL; - const uint16_t *filter_h_c = NULL; - const uint16_t *filter_v_c = NULL; - - h_2tap_hardcode_coef_en = scl_data->taps.h_taps < 3 - && scl_data->taps.h_taps_c < 3 - && (scl_data->taps.h_taps > 1 && scl_data->taps.h_taps_c > 1); - v_2tap_hardcode_coef_en = scl_data->taps.v_taps < 3 - && scl_data->taps.v_taps_c < 3 - && (scl_data->taps.v_taps > 1 && scl_data->taps.v_taps_c > 1); - - h_2tap_sharp_en = h_2tap_hardcode_coef_en && h_2tap_sharp_factor != 0; - v_2tap_sharp_en = v_2tap_hardcode_coef_en && v_2tap_sharp_factor != 0; - - REG_UPDATE_6(DSCL_2TAP_CONTROL, - SCL_H_2TAP_HARDCODE_COEF_EN, h_2tap_hardcode_coef_en, - SCL_H_2TAP_SHARP_EN, h_2tap_sharp_en, - SCL_H_2TAP_SHARP_FACTOR, h_2tap_sharp_factor, - SCL_V_2TAP_HARDCODE_COEF_EN, v_2tap_hardcode_coef_en, - SCL_V_2TAP_SHARP_EN, v_2tap_sharp_en, - SCL_V_2TAP_SHARP_FACTOR, v_2tap_sharp_factor); - - if (!v_2tap_hardcode_coef_en || !h_2tap_hardcode_coef_en) { - bool filter_updated = false; - - filter_h = dpp1_dscl_get_filter_coeffs_64p( - scl_data->taps.h_taps, scl_data->ratios.horz); - filter_v = dpp1_dscl_get_filter_coeffs_64p( - scl_data->taps.v_taps, scl_data->ratios.vert); - - filter_updated = (filter_h && (filter_h != dpp->filter_h)) - || (filter_v && (filter_v != dpp->filter_v)); - - if (chroma_coef_mode) { - filter_h_c = dpp1_dscl_get_filter_coeffs_64p( - scl_data->taps.h_taps_c, scl_data->ratios.horz_c); - filter_v_c = dpp1_dscl_get_filter_coeffs_64p( - scl_data->taps.v_taps_c, scl_data->ratios.vert_c); - filter_updated = filter_updated || (filter_h_c && (filter_h_c != dpp->filter_h_c)) - || (filter_v_c && (filter_v_c != dpp->filter_v_c)); - } - - if (filter_updated) { - uint32_t scl_mode = REG_READ(SCL_MODE); - - if (!h_2tap_hardcode_coef_en && filter_h) { - dpp1_dscl_set_scaler_filter( - dpp, scl_data->taps.h_taps, - SCL_COEF_LUMA_HORZ_FILTER, filter_h); - } - dpp->filter_h = filter_h; - if (!v_2tap_hardcode_coef_en && filter_v) { - dpp1_dscl_set_scaler_filter( - dpp, scl_data->taps.v_taps, - SCL_COEF_LUMA_VERT_FILTER, filter_v); - } - dpp->filter_v = filter_v; - if (chroma_coef_mode) { - if (!h_2tap_hardcode_coef_en && filter_h_c) { - dpp1_dscl_set_scaler_filter( - dpp, scl_data->taps.h_taps_c, - SCL_COEF_CHROMA_HORZ_FILTER, filter_h_c); - } - if (!v_2tap_hardcode_coef_en && filter_v_c) { - dpp1_dscl_set_scaler_filter( - dpp, scl_data->taps.v_taps_c, - SCL_COEF_CHROMA_VERT_FILTER, filter_v_c); - } - } - dpp->filter_h_c = filter_h_c; - dpp->filter_v_c = filter_v_c; - - coef_ram_current = get_reg_field_value_ex( - scl_mode, dpp->tf_mask->SCL_COEF_RAM_SELECT_CURRENT, - dpp->tf_shift->SCL_COEF_RAM_SELECT_CURRENT); - - /* Swap coefficient RAM and set chroma coefficient mode */ - REG_SET_2(SCL_MODE, scl_mode, - SCL_COEF_RAM_SELECT, !coef_ram_current, - SCL_CHROMA_COEF_MODE, chroma_coef_mode); - } - } -} - -static int dpp1_dscl_get_lb_depth_bpc(enum lb_pixel_depth depth) -{ - if (depth == LB_PIXEL_DEPTH_30BPP) - return 10; - else if (depth == LB_PIXEL_DEPTH_24BPP) - return 8; - else if (depth == LB_PIXEL_DEPTH_18BPP) - return 6; - else if (depth == LB_PIXEL_DEPTH_36BPP) - return 12; - else { - BREAK_TO_DEBUGGER(); - return -1; /* Unsupported */ - } -} - -void dpp1_dscl_calc_lb_num_partitions( - const struct scaler_data *scl_data, - enum lb_memory_config lb_config, - int *num_part_y, - int *num_part_c) -{ - int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a, - lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a; - - int line_size = scl_data->viewport.width < scl_data->recout.width ? - scl_data->viewport.width : scl_data->recout.width; - int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? - scl_data->viewport_c.width : scl_data->recout.width; - - if (line_size == 0) - line_size = 1; - - if (line_size_c == 0) - line_size_c = 1; - - - lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth); - memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */ - memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */ - memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ - - if (lb_config == LB_MEMORY_CONFIG_1) { - lb_memory_size = 816; - lb_memory_size_c = 816; - lb_memory_size_a = 984; - } else if (lb_config == LB_MEMORY_CONFIG_2) { - lb_memory_size = 1088; - lb_memory_size_c = 1088; - lb_memory_size_a = 1312; - } else if (lb_config == LB_MEMORY_CONFIG_3) { - /* 420 mode: using 3rd mem from Y, Cr and Cb */ - lb_memory_size = 816 + 1088 + 848 + 848 + 848; - lb_memory_size_c = 816 + 1088; - lb_memory_size_a = 984 + 1312 + 456; - } else { - lb_memory_size = 816 + 1088 + 848; - lb_memory_size_c = 816 + 1088 + 848; - lb_memory_size_a = 984 + 1312 + 456; - } - *num_part_y = lb_memory_size / memory_line_size_y; - *num_part_c = lb_memory_size_c / memory_line_size_c; - num_partitions_a = lb_memory_size_a / memory_line_size_a; - - if (scl_data->lb_params.alpha_en - && (num_partitions_a < *num_part_y)) - *num_part_y = num_partitions_a; - - if (*num_part_y > 64) - *num_part_y = 64; - if (*num_part_c > 64) - *num_part_c = 64; - -} - -bool dpp1_dscl_is_lb_conf_valid(int ceil_vratio, int num_partitions, int vtaps) -{ - if (ceil_vratio > 2) - return vtaps <= (num_partitions - ceil_vratio + 2); - else - return vtaps <= num_partitions; -} - -/*find first match configuration which meets the min required lb size*/ -static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *dpp, - const struct scaler_data *scl_data) -{ - int num_part_y, num_part_c; - int vtaps = scl_data->taps.v_taps; - int vtaps_c = scl_data->taps.v_taps_c; - int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert); - int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c); - - if (dpp->base.ctx->dc->debug.use_max_lb) { - if (scl_data->format == PIXEL_FORMAT_420BPP8 - || scl_data->format == PIXEL_FORMAT_420BPP10) - return LB_MEMORY_CONFIG_3; - return LB_MEMORY_CONFIG_0; - } - - dpp->base.caps->dscl_calc_lb_num_partitions( - scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c); - - if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) - && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) - return LB_MEMORY_CONFIG_1; - - dpp->base.caps->dscl_calc_lb_num_partitions( - scl_data, LB_MEMORY_CONFIG_2, &num_part_y, &num_part_c); - - if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) - && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) - return LB_MEMORY_CONFIG_2; - - if (scl_data->format == PIXEL_FORMAT_420BPP8 - || scl_data->format == PIXEL_FORMAT_420BPP10) { - dpp->base.caps->dscl_calc_lb_num_partitions( - scl_data, LB_MEMORY_CONFIG_3, &num_part_y, &num_part_c); - - if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) - && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) - return LB_MEMORY_CONFIG_3; - } - - dpp->base.caps->dscl_calc_lb_num_partitions( - scl_data, LB_MEMORY_CONFIG_0, &num_part_y, &num_part_c); - - /*Ensure we can support the requested number of vtaps*/ - ASSERT(dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) - && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)); - - return LB_MEMORY_CONFIG_0; -} - - -static void dpp1_dscl_set_manual_ratio_init( - struct dcn10_dpp *dpp, const struct scaler_data *data) -{ - uint32_t init_frac = 0; - uint32_t init_int = 0; - - REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, - SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5); - - REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, - SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5); - - REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0, - SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5); - - REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0, - SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5); - - /* - * 0.24 format for fraction, first five bits zeroed - */ - init_frac = dc_fixpt_u0d19(data->inits.h) << 5; - init_int = dc_fixpt_floor(data->inits.h); - REG_SET_2(SCL_HORZ_FILTER_INIT, 0, - SCL_H_INIT_FRAC, init_frac, - SCL_H_INIT_INT, init_int); - - init_frac = dc_fixpt_u0d19(data->inits.h_c) << 5; - init_int = dc_fixpt_floor(data->inits.h_c); - REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0, - SCL_H_INIT_FRAC_C, init_frac, - SCL_H_INIT_INT_C, init_int); - - init_frac = dc_fixpt_u0d19(data->inits.v) << 5; - init_int = dc_fixpt_floor(data->inits.v); - REG_SET_2(SCL_VERT_FILTER_INIT, 0, - SCL_V_INIT_FRAC, init_frac, - SCL_V_INIT_INT, init_int); - - if (REG(SCL_VERT_FILTER_INIT_BOT)) { - struct fixed31_32 bot = dc_fixpt_add(data->inits.v, data->ratios.vert); - - init_frac = dc_fixpt_u0d19(bot) << 5; - init_int = dc_fixpt_floor(bot); - REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0, - SCL_V_INIT_FRAC_BOT, init_frac, - SCL_V_INIT_INT_BOT, init_int); - } - - init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5; - init_int = dc_fixpt_floor(data->inits.v_c); - REG_SET_2(SCL_VERT_FILTER_INIT_C, 0, - SCL_V_INIT_FRAC_C, init_frac, - SCL_V_INIT_INT_C, init_int); - - if (REG(SCL_VERT_FILTER_INIT_BOT_C)) { - struct fixed31_32 bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); - - init_frac = dc_fixpt_u0d19(bot) << 5; - init_int = dc_fixpt_floor(bot); - REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0, - SCL_V_INIT_FRAC_BOT_C, init_frac, - SCL_V_INIT_INT_BOT_C, init_int); - } -} - -/** - * dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area - * - * @dpp: DPP data struct - * @recout: Rectangle information - * - * This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on - * the values specified in the recount parameter. - * - * Note: This function only have effect if AutoCal is disabled. - */ -static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, - const struct rect *recout) -{ - REG_SET_2(RECOUT_START, 0, - /* First pixel of RECOUT in the active OTG area */ - RECOUT_START_X, recout->x, - /* First line of RECOUT in the active OTG area */ - RECOUT_START_Y, recout->y); - - REG_SET_2(RECOUT_SIZE, 0, - /* Number of RECOUT horizontal pixels */ - RECOUT_WIDTH, recout->width, - /* Number of RECOUT vertical lines */ - RECOUT_HEIGHT, recout->height); -} - -/** - * dpp1_dscl_set_scaler_manual_scale - Manually program scaler and line buffer - * - * @dpp_base: High level DPP struct - * @scl_data: scalaer_data info - * - * This is the primary function to program scaler and line buffer in manual - * scaling mode. To execute the required operations for manual scale, we need - * to disable AutoCal first. - */ -void dpp1_dscl_set_scaler_manual_scale(struct dpp *dpp_base, - const struct scaler_data *scl_data) -{ - enum lb_memory_config lb_config; - struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode( - dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale); - bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN - && scl_data->format <= PIXEL_FORMAT_VIDEO_END; - - if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0) - return; - - PERF_TRACE(); - - dpp->scl_data = *scl_data; - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) { - if (dscl_mode != DSCL_MODE_DSCL_BYPASS) - dpp1_power_on_dscl(dpp_base, true); - } - - /* Autocal off */ - REG_SET_3(DSCL_AUTOCAL, 0, - AUTOCAL_MODE, AUTOCAL_MODE_OFF, - AUTOCAL_NUM_PIPE, 0, - AUTOCAL_PIPE_ID, 0); - - /*clean scaler boundary mode when Autocal off*/ - REG_SET(DSCL_CONTROL, 0, - SCL_BOUNDARY_MODE, 0); - - /* Recout */ - dpp1_dscl_set_recout(dpp, &scl_data->recout); - - /* MPC Size */ - REG_SET_2(MPC_SIZE, 0, - /* Number of horizontal pixels of MPC */ - MPC_WIDTH, scl_data->h_active, - /* Number of vertical lines of MPC */ - MPC_HEIGHT, scl_data->v_active); - - /* SCL mode */ - REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode); - - if (dscl_mode == DSCL_MODE_DSCL_BYPASS) { - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) - dpp1_power_on_dscl(dpp_base, false); - return; - } - - /* LB */ - lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data); - dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config); - - if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) - return; - - /* Black offsets */ - if (REG(SCL_BLACK_OFFSET)) { - if (ycbcr) - REG_SET_2(SCL_BLACK_OFFSET, 0, - SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, - SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR); - else - - REG_SET_2(SCL_BLACK_OFFSET, 0, - SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, - SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y); - } - - /* Manually calculate scale ratio and init values */ - dpp1_dscl_set_manual_ratio_init(dpp, scl_data); - - /* HTaps/VTaps */ - REG_SET_4(SCL_TAP_CONTROL, 0, - SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1, - SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1, - SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1, - SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1); - - dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr); - PERF_TRACE(); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile index 3dae3943b056..9b6070c99794 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile @@ -2,7 +2,7 @@ # # Makefile for DCN. -DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \ +DCN20 = dcn20_hubp.o \ dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \ dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \ dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c deleted file mode 100644 index 1516c0a48726..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ /dev/null @@ -1,435 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn20_dpp.h" -#include "basics/conversion.h" - -#define NUM_PHASES 64 -#define HORZ_MAX_TAPS 8 -#define VERT_MAX_TAPS 8 - -#define BLACK_OFFSET_RGB_Y 0x0 -#define BLACK_OFFSET_CBCR 0x8000 - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -void dpp20_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_GET(DPP_CONTROL, - DPP_CLOCK_ENABLE, &s->is_enabled); - - // Degamma LUT (RAM) - REG_GET(CM_DGAM_CONTROL, - CM_DGAM_LUT_MODE, &s->dgam_lut_mode); - - // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) - REG_GET(CM_SHAPER_CONTROL, - CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); - REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode, - CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); - REG_GET(CM_3DLUT_MODE, - CM_3DLUT_SIZE, &s->lut3d_size); - - // Blend/Out Gamma (RAM) - REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, - CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode); -} - -void dpp2_power_on_obuf( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, power_on == true ? 1:0); - - REG_UPDATE(OBUF_MEM_PWR_CTRL, - OBUF_MEM_PWR_FORCE, power_on == true ? 0:1); - - REG_UPDATE(DSCL_MEM_PWR_CTRL, - LUT_MEM_PWR_FORCE, power_on == true ? 0:1); -} - -void dpp2_dummy_program_input_lut( - struct dpp *dpp_base, - const struct dc_gamma *gamma) -{} - -static void dpp2_cnv_setup ( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - uint32_t pixel_format = 0; - uint32_t alpha_en = 1; - enum dc_color_space color_space = COLOR_SPACE_SRGB; - enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS; - bool force_disable_cursor = false; - struct out_csc_color_matrix tbl_entry; - uint32_t is_2bit = 0; - int i = 0; - - REG_SET_2(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_EXPANSION_MODE, mode); - - //hardcode default - //FORMAT_CONTROL. FORMAT_CNV16 default 0: U0.16/S.1.15; 1: U1.15/ S.1.14 - //FORMAT_CONTROL. CNVC_BYPASS_MSB_ALIGN default 0: disabled 1: enabled - //FORMAT_CONTROL. CLAMP_POSITIVE default 0: disabled 1: enabled - //FORMAT_CONTROL. CLAMP_POSITIVE_C default 0: disabled 1: enabled - REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); - REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); - - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - pixel_format = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - pixel_format = 3; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - pixel_format = 8; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - pixel_format = 10; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - force_disable_cursor = false; - pixel_format = 65; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - force_disable_cursor = true; - pixel_format = 64; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - force_disable_cursor = true; - pixel_format = 67; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - force_disable_cursor = true; - pixel_format = 66; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - pixel_format = 26; /* ARGB16161616_UNORM */ - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - pixel_format = 24; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - pixel_format = 25; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: - pixel_format = 12; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: - pixel_format = 112; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: - pixel_format = 113; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: - pixel_format = 114; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: - pixel_format = 115; - color_space = COLOR_SPACE_YCBCR709; - select = DCN2_ICSC_SELECT_ICSC_A; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: - pixel_format = 118; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: - pixel_format = 119; - alpha_en = 0; - break; - default: - break; - } - - /* Set default color space based on format if none is given. */ - color_space = input_color_space ? input_color_space : color_space; - - if (is_2bit == 1 && alpha_2bit_lut != NULL) { - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); - } - - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, pixel_format); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); - - // if input adjustments exist, program icsc with those values - if (input_csc_color_matrix.enable_adjustment - == true) { - for (i = 0; i < 12; i++) - tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; - - tbl_entry.color_space = input_color_space; - - if (color_space >= COLOR_SPACE_YCBCR601) - select = DCN2_ICSC_SELECT_ICSC_A; - else - select = DCN2_ICSC_SELECT_BYPASS; - - dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry); - } else - dpp2_program_input_csc(dpp_base, color_space, select, NULL); - - if (force_disable_cursor) { - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, 0); - REG_UPDATE(CURSOR0_CONTROL, - CUR0_ENABLE, 0); - - } - dpp2_power_on_obuf(dpp_base, true); - -} - -/*compute the maximum number of lines that we can fit in the line buffer*/ -void dscl2_calc_lb_num_partitions( - const struct scaler_data *scl_data, - enum lb_memory_config lb_config, - int *num_part_y, - int *num_part_c) -{ - int memory_line_size_y, memory_line_size_c, memory_line_size_a, - lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; - - int line_size = scl_data->viewport.width < scl_data->recout.width ? - scl_data->viewport.width : scl_data->recout.width; - int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? - scl_data->viewport_c.width : scl_data->recout.width; - - if (line_size == 0) - line_size = 1; - - if (line_size_c == 0) - line_size_c = 1; - - memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ - memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ - memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ - - if (lb_config == LB_MEMORY_CONFIG_1) { - lb_memory_size = 970; - lb_memory_size_c = 970; - lb_memory_size_a = 970; - } else if (lb_config == LB_MEMORY_CONFIG_2) { - lb_memory_size = 1290; - lb_memory_size_c = 1290; - lb_memory_size_a = 1290; - } else if (lb_config == LB_MEMORY_CONFIG_3) { - /* 420 mode: using 3rd mem from Y, Cr and Cb */ - lb_memory_size = 970 + 1290 + 484 + 484 + 484; - lb_memory_size_c = 970 + 1290; - lb_memory_size_a = 970 + 1290 + 484; - } else { - lb_memory_size = 970 + 1290 + 484; - lb_memory_size_c = 970 + 1290 + 484; - lb_memory_size_a = 970 + 1290 + 484; - } - *num_part_y = lb_memory_size / memory_line_size_y; - *num_part_c = lb_memory_size_c / memory_line_size_c; - num_partitions_a = lb_memory_size_a / memory_line_size_a; - - if (scl_data->lb_params.alpha_en - && (num_partitions_a < *num_part_y)) - *num_part_y = num_partitions_a; - - if (*num_part_y > 64) - *num_part_y = 64; - if (*num_part_c > 64) - *num_part_c = 64; -} - -void dpp2_cnv_set_alpha_keyer( - struct dpp *dpp_base, - struct cnv_color_keyer_params *color_keyer) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_EN, color_keyer->color_keyer_en); - - REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, color_keyer->color_keyer_mode); - - REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, color_keyer->color_keyer_alpha_low); - REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, color_keyer->color_keyer_alpha_high); - - REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, color_keyer->color_keyer_red_low); - REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, color_keyer->color_keyer_red_high); - - REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, color_keyer->color_keyer_green_low); - REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, color_keyer->color_keyer_green_high); - - REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, color_keyer->color_keyer_blue_low); - REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, color_keyer->color_keyer_blue_high); -} - -void dpp2_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes) -{ - enum dc_cursor_color_format color_format = cursor_attributes->color_format; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - int cur_rom_en = 0; - - if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || - color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { - cur_rom_en = 1; - } - } - - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0, - CUR0_ROM_EN, cur_rom_en); - - if (color_format == CURSOR_MODE_MONO) { - /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); - } -} - -void oppn20_dummy_program_regamma_pwl( - struct dpp *dpp, - const struct pwl_params *params, - enum opp_regamma mode) -{} - -static struct dpp_funcs dcn20_dpp_funcs = { - .dpp_read_state = dpp20_read_state, - .dpp_reset = dpp_reset, - .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap, - .dpp_set_csc_adjustment = NULL, - .dpp_set_csc_default = NULL, - .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, - .dpp_set_degamma = dpp2_set_degamma, - .dpp_program_input_lut = dpp2_dummy_program_input_lut, - .dpp_full_bypass = dpp1_full_bypass, - .dpp_setup = dpp2_cnv_setup, - .dpp_program_degamma_pwl = dpp2_set_degamma_pwl, - .dpp_program_blnd_lut = dpp20_program_blnd_lut, - .dpp_program_shaper_lut = dpp20_program_shaper, - .dpp_program_3dlut = dpp20_program_3dlut, - .dpp_program_bias_and_scale = NULL, - .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, - .set_cursor_attributes = dpp2_set_cursor_attributes, - .set_cursor_position = dpp1_set_cursor_position, - .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, - .dpp_dppclk_control = dpp1_dppclk_control, - .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, - .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, -}; - -static struct dpp_caps dcn20_dpp_cap = { - .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, - .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, -}; - -bool dpp2_construct( - struct dcn20_dpp *dpp, - struct dc_context *ctx, - uint32_t inst, - const struct dcn2_dpp_registers *tf_regs, - const struct dcn2_dpp_shift *tf_shift, - const struct dcn2_dpp_mask *tf_mask) -{ - dpp->base.ctx = ctx; - - dpp->base.inst = inst; - dpp->base.funcs = &dcn20_dpp_funcs; - dpp->base.caps = &dcn20_dpp_cap; - - dpp->tf_regs = tf_regs; - dpp->tf_shift = tf_shift; - dpp->tf_mask = tf_mask; - - dpp->lb_pixel_depth_supported = - LB_PIXEL_DEPTH_18BPP | - LB_PIXEL_DEPTH_24BPP | - LB_PIXEL_DEPTH_30BPP | - LB_PIXEL_DEPTH_36BPP; - - dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; - dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/ - - return true; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h deleted file mode 100644 index 672cde46c4b9..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ /dev/null @@ -1,781 +0,0 @@ -/* Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN20_DPP_H__ -#define __DCN20_DPP_H__ - -#include "dcn10/dcn10_dpp.h" - -#define TO_DCN20_DPP(dpp)\ - container_of(dpp, struct dcn20_dpp, base) - -#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \ - SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id) - -#define TF_REG_LIST_DCN20_COMMON(id) \ - SRI(CM_BLNDGAM_CONTROL, CM, id), \ - SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL2_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL1_R, CM, id), \ - SRI(CM_BLNDGAM_RAMB_END_CNTL2_R, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_0_1, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_2_3, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_4_5, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_6_7, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_8_9, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_10_11, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_12_13, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_14_15, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_16_17, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_18_19, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_20_21, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_22_23, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_24_25, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_26_27, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_28_29, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_30_31, CM, id), \ - SRI(CM_BLNDGAM_RAMB_REGION_32_33, CM, id), \ - SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL2_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL1_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_END_CNTL2_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_0_1, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_2_3, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_4_5, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_6_7, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_8_9, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_10_11, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_12_13, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_14_15, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_16_17, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_18_19, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_20_21, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_22_23, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_24_25, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_26_27, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_28_29, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_30_31, CM, id), \ - SRI(CM_BLNDGAM_RAMA_REGION_32_33, CM, id), \ - SRI(CM_BLNDGAM_LUT_INDEX, CM, id), \ - SRI(CM_BLNDGAM_LUT_DATA, CM, id), \ - SRI(CM_3DLUT_MODE, CM, id), \ - SRI(CM_3DLUT_INDEX, CM, id), \ - SRI(CM_3DLUT_DATA, CM, id), \ - SRI(CM_3DLUT_DATA_30BIT, CM, id), \ - SRI(CM_3DLUT_READ_WRITE_CONTROL, CM, id), \ - SRI(CM_SHAPER_LUT_WRITE_EN_MASK, CM, id), \ - SRI(CM_SHAPER_CONTROL, CM, id), \ - SRI(CM_SHAPER_RAMB_START_CNTL_B, CM, id), \ - SRI(CM_SHAPER_RAMB_START_CNTL_G, CM, id), \ - SRI(CM_SHAPER_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_SHAPER_RAMB_END_CNTL_B, CM, id), \ - SRI(CM_SHAPER_RAMB_END_CNTL_G, CM, id), \ - SRI(CM_SHAPER_RAMB_END_CNTL_R, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_0_1, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_2_3, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_4_5, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_6_7, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_8_9, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_10_11, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_12_13, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_14_15, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_16_17, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_18_19, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_20_21, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_22_23, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_24_25, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_26_27, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_28_29, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_30_31, CM, id), \ - SRI(CM_SHAPER_RAMB_REGION_32_33, CM, id), \ - SRI(CM_SHAPER_RAMA_START_CNTL_B, CM, id), \ - SRI(CM_SHAPER_RAMA_START_CNTL_G, CM, id), \ - SRI(CM_SHAPER_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_SHAPER_RAMA_END_CNTL_B, CM, id), \ - SRI(CM_SHAPER_RAMA_END_CNTL_G, CM, id), \ - SRI(CM_SHAPER_RAMA_END_CNTL_R, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_0_1, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_2_3, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_4_5, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_6_7, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_8_9, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_10_11, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_12_13, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_14_15, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_16_17, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_18_19, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_20_21, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_22_23, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_24_25, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_26_27, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \ - SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \ - SRI(CM_SHAPER_LUT_INDEX, CM, id) - -#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \ - SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ - SRI(CM_ICSC_B_C11_C12, CM, id), \ - SRI(CM_ICSC_B_C33_C34, CM, id) - -#define TF_REG_LIST_DCN20(id) \ - TF_REG_LIST_DCN(id), \ - TF_REG_LIST_DCN20_COMMON(id), \ - TF_REG_LIST_DCN20_COMMON_UPDATED(id), \ - SRI(CURSOR_CONTROL, CURSOR0_, id), \ - SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \ - SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \ - SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \ - SRI(COLOR_KEYER_RED, CNVC_CFG, id), \ - SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \ - SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \ - SRI(CM_SHAPER_LUT_DATA, CM, id), \ - SRI(CURSOR_CONTROL, CURSOR0_, id),\ - SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ - SRI(DSCL_MEM_PWR_CTRL, DSCL, id) - - -#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh) - - -#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\ - TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \ - TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \ - TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ - TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_SIZE, mask_sh), \ - TF_SF(CM0_CM_3DLUT_INDEX, CM_3DLUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_3DLUT_DATA, CM_3DLUT_DATA0, mask_sh), \ - TF_SF(CM0_CM_3DLUT_DATA, CM_3DLUT_DATA1, mask_sh), \ - TF_SF(CM0_CM_3DLUT_DATA_30BIT, CM_3DLUT_DATA_30BIT, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \ - TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_START_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_START_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_START_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ - TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh) - - -#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ - TF_REG_LIST_SH_MASK_DCN(mask_sh), \ - TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ - TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \ - TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \ - TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\ - TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ - TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) - -/* DPP CM debug status register: - * - * Status index including current ICSC, Gamut Remap Mode is 9 - * ICSC Mode: [4..3] - * Gamut Remap Mode: [10..9] - */ -#define CM_TEST_DEBUG_DATA_STATUS_IDX 9 - -#define TF_DEBUG_REG_LIST_SH_DCN20 \ - TF_DEBUG_REG_LIST_SH_DCN10, \ - .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \ - .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9 - -#define TF_DEBUG_REG_LIST_MASK_DCN20 \ - TF_DEBUG_REG_LIST_MASK_DCN10, \ - .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \ - .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600 - -#define TF_REG_FIELD_LIST_DCN2_0(type) \ - TF_REG_FIELD_LIST(type) \ - type CM_BLNDGAM_LUT_DATA; \ - type CM_TEST_DEBUG_DATA_ICSC_MODE; \ - type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \ - type FORMAT_CNV16; \ - type CNVC_BYPASS_MSB_ALIGN; \ - type CLAMP_POSITIVE; \ - type CLAMP_POSITIVE_C; \ - type ALPHA_2BIT_LUT0; \ - type ALPHA_2BIT_LUT1; \ - type ALPHA_2BIT_LUT2; \ - type ALPHA_2BIT_LUT3; \ - type FCNV_FP_BIAS_R; \ - type FCNV_FP_BIAS_G; \ - type FCNV_FP_BIAS_B; \ - type FCNV_FP_SCALE_R; \ - type FCNV_FP_SCALE_G; \ - type FCNV_FP_SCALE_B; \ - type COLOR_KEYER_EN; \ - type COLOR_KEYER_MODE; \ - type COLOR_KEYER_ALPHA_LOW; \ - type COLOR_KEYER_ALPHA_HIGH; \ - type COLOR_KEYER_RED_LOW; \ - type COLOR_KEYER_RED_HIGH; \ - type COLOR_KEYER_GREEN_LOW; \ - type COLOR_KEYER_GREEN_HIGH; \ - type COLOR_KEYER_BLUE_LOW; \ - type COLOR_KEYER_BLUE_HIGH; \ - type CUR0_PIX_INV_MODE; \ - type CUR0_PIXEL_ALPHA_MOD_EN; \ - type CUR0_ROM_EN;\ - type OBUF_MEM_PWR_FORCE - - -struct dcn2_dpp_shift { - TF_REG_FIELD_LIST_DCN2_0(uint8_t); -}; - -struct dcn2_dpp_mask { - TF_REG_FIELD_LIST_DCN2_0(uint32_t); -}; - -#define DPP_DCN2_REG_VARIABLE_LIST \ - DPP_COMMON_REG_VARIABLE_LIST \ - uint32_t CM_BLNDGAM_LUT_DATA; \ - uint32_t ALPHA_2BIT_LUT; \ - uint32_t FCNV_FP_BIAS_R; \ - uint32_t FCNV_FP_BIAS_G; \ - uint32_t FCNV_FP_BIAS_B; \ - uint32_t FCNV_FP_SCALE_R; \ - uint32_t FCNV_FP_SCALE_G; \ - uint32_t FCNV_FP_SCALE_B; \ - uint32_t COLOR_KEYER_CONTROL; \ - uint32_t COLOR_KEYER_ALPHA; \ - uint32_t COLOR_KEYER_RED; \ - uint32_t COLOR_KEYER_GREEN; \ - uint32_t COLOR_KEYER_BLUE; \ - uint32_t OBUF_MEM_PWR_CTRL - -#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \ - uint32_t CM_GAMUT_REMAP_B_C11_C12; \ - uint32_t CM_GAMUT_REMAP_B_C13_C14; \ - uint32_t CM_GAMUT_REMAP_B_C21_C22; \ - uint32_t CM_GAMUT_REMAP_B_C23_C24; \ - uint32_t CM_GAMUT_REMAP_B_C31_C32; \ - uint32_t CM_GAMUT_REMAP_B_C33_C34; \ - uint32_t CM_ICSC_B_C11_C12; \ - uint32_t CM_ICSC_B_C33_C34 - -struct dcn2_dpp_registers { - DPP_DCN2_REG_VARIABLE_LIST; - DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND; -}; - -struct dcn20_dpp { - struct dpp base; - - const struct dcn2_dpp_registers *tf_regs; - const struct dcn2_dpp_shift *tf_shift; - const struct dcn2_dpp_mask *tf_mask; - - const uint16_t *filter_v; - const uint16_t *filter_h; - const uint16_t *filter_v_c; - const uint16_t *filter_h_c; - int lb_pixel_depth_supported; - int lb_memory_size; - int lb_bits_per_entry; - bool is_write_to_ram_a_safe; - struct scaler_data scl_data; - struct pwl_params pwl_data; -}; - -enum dcn20_input_csc_select { - DCN2_ICSC_SELECT_BYPASS = 0, - DCN2_ICSC_SELECT_ICSC_A = 1, - DCN2_ICSC_SELECT_ICSC_B = 2 -}; - -enum dcn20_gamut_remap_select { - DCN2_GAMUT_REMAP_BYPASS = 0, - DCN2_GAMUT_REMAP_COEF_A = 1, - DCN2_GAMUT_REMAP_COEF_B = 2 -}; - -void dpp20_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s); - -void dpp2_set_degamma_pwl( - struct dpp *dpp_base, - const struct pwl_params *params); - -void dpp2_set_degamma( - struct dpp *dpp_base, - enum ipp_degamma_mode mode); - -void dpp2_cm_set_gamut_remap( - struct dpp *dpp_base, - const struct dpp_grph_csc_adjustment *adjust); - -void dpp2_program_input_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn20_input_csc_select input_select, - const struct out_csc_color_matrix *tbl_entry); - -bool dpp20_program_blnd_lut( - struct dpp *dpp_base, const struct pwl_params *params); - -bool dpp20_program_shaper( - struct dpp *dpp_base, - const struct pwl_params *params); - -bool dpp20_program_3dlut( - struct dpp *dpp_base, - struct tetrahedral_params *params); - -void dpp2_cnv_set_alpha_keyer( - struct dpp *dpp_base, - struct cnv_color_keyer_params *color_keyer); - -void dscl2_calc_lb_num_partitions( - const struct scaler_data *scl_data, - enum lb_memory_config lb_config, - int *num_part_y, - int *num_part_c); - -void dpp2_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes); - -void dpp2_dummy_program_input_lut( - struct dpp *dpp_base, - const struct dc_gamma *gamma); - -void oppn20_dummy_program_regamma_pwl( - struct dpp *dpp, - const struct pwl_params *params, - enum opp_regamma mode); - -void dpp2_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier); - -bool dpp2_construct(struct dcn20_dpp *dpp2, - struct dc_context *ctx, - uint32_t inst, - const struct dcn2_dpp_registers *tf_regs, - const struct dcn2_dpp_shift *tf_shift, - const struct dcn2_dpp_mask *tf_mask); - -void dpp2_power_on_obuf( - struct dpp *dpp_base, - bool power_on); - -void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust); -#endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c deleted file mode 100644 index 58dc69926e8a..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp_cm.c +++ /dev/null @@ -1,1202 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn20_dpp.h" -#include "basics/conversion.h" - -#include "dcn10/dcn10_cm_common.h" - -#define REG(reg)\ - dpp->tf_regs->reg - -#define IND_REG(index) \ - (index) - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - - -static void dpp2_enable_cm_block( - struct dpp *dpp_base) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - unsigned int cm_bypass_mode = 0; - //Temp, put CM in bypass mode - if (dpp_base->ctx->dc->debug.cm_in_bypass) - cm_bypass_mode = 1; - - REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode); -} - - -static bool dpp2_degamma_ram_inuse( - struct dpp *dpp_base, - bool *ram_a_inuse) -{ - bool ret = false; - uint32_t status_reg = 0; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_GET(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, - &status_reg); - - if (status_reg == 3) { - *ram_a_inuse = true; - ret = true; - } else if (status_reg == 4) { - *ram_a_inuse = false; - ret = true; - } - return ret; -} - -static void dpp2_program_degamma_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num, - bool is_ram_a) -{ - uint32_t i; - - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, - CM_DGAM_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, - is_ram_a == true ? 0:1); - - REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); - for (i = 0 ; i < num; i++) { - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); - - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); - REG_SET(CM_DGAM_LUT_DATA, 0, - CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); - - } - -} - -void dpp2_set_degamma_pwl( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - bool is_ram_a = true; - - dpp1_power_on_degamma_lut(dpp_base, true); - dpp2_enable_cm_block(dpp_base); - dpp2_degamma_ram_inuse(dpp_base, &is_ram_a); - if (is_ram_a == true) - dpp1_program_degamma_lutb_settings(dpp_base, params); - else - dpp1_program_degamma_luta_settings(dpp_base, params); - - dpp2_program_degamma_lut(dpp_base, params->rgb_resulted, params->hw_points_num, !is_ram_a); - dpp1_degamma_ram_select(dpp_base, !is_ram_a); -} - -void dpp2_set_degamma( - struct dpp *dpp_base, - enum ipp_degamma_mode mode) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - dpp2_enable_cm_block(dpp_base); - - switch (mode) { - case IPP_DEGAMMA_MODE_BYPASS: - /* Setting de gamma bypass for now */ - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); - break; - case IPP_DEGAMMA_MODE_HW_sRGB: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); - break; - case IPP_DEGAMMA_MODE_HW_xvYCC: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); - break; - case IPP_DEGAMMA_MODE_USER_PWL: - REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); - break; - default: - BREAK_TO_DEBUGGER(); - break; - } -} - -static void program_gamut_remap( - struct dcn20_dpp *dpp, - const uint16_t *regval, - enum dcn20_gamut_remap_select select) -{ - uint32_t cur_select = 0; - struct color_matrices_reg gam_regs; - - if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) { - REG_SET(CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, 0); - return; - } - - /* determine which gamut_remap coefficients (A or B) we are using - * currently. select the alternate set to double buffer - * the update so gamut_remap is updated on frame boundary - */ - IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, - CM_TEST_DEBUG_DATA_STATUS_IDX, - CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select); - - /* value stored in dbg reg will be 1 greater than mode we want */ - if (cur_select != DCN2_GAMUT_REMAP_COEF_A) - select = DCN2_GAMUT_REMAP_COEF_A; - else - select = DCN2_GAMUT_REMAP_COEF_B; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - if (select == DCN2_GAMUT_REMAP_COEF_A) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - } else { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); - } - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - REG_SET( - CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, select); - -} - -void dpp2_cm_set_gamut_remap( - struct dpp *dpp_base, - const struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - int i = 0; - - if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) - /* Bypass if type is bypass or hw */ - program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS); - else { - struct fixed31_32 arr_matrix[12]; - uint16_t arr_reg_val[12]; - - for (i = 0; i < 12; i++) - arr_matrix[i] = adjust->temperature_matrix[i]; - - convert_float_matrix( - arr_reg_val, arr_matrix, 12); - - program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A); - } -} - -static void read_gamut_remap(struct dcn20_dpp *dpp, - uint16_t *regval, - enum dcn20_gamut_remap_select *select) -{ - struct color_matrices_reg gam_regs; - uint32_t selection; - - IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, - CM_TEST_DEBUG_DATA_STATUS_IDX, - CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection); - - *select = selection; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - if (*select == DCN2_GAMUT_REMAP_COEF_A) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - - cm_helper_read_color_matrices(dpp->base.ctx, - regval, - &gam_regs); - - } else if (*select == DCN2_GAMUT_REMAP_COEF_B) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); - - cm_helper_read_color_matrices(dpp->base.ctx, - regval, - &gam_regs); - } -} - -void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - uint16_t arr_reg_val[12]; - enum dcn20_gamut_remap_select select; - - read_gamut_remap(dpp, arr_reg_val, &select); - - if (select == DCN2_GAMUT_REMAP_BYPASS) { - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - return; - } - - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - convert_hw_matrix(adjust->temperature_matrix, - arr_reg_val, ARRAY_SIZE(arr_reg_val)); -} - -void dpp2_program_input_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn20_input_csc_select input_select, - const struct out_csc_color_matrix *tbl_entry) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - int i; - int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); - const uint16_t *regval = NULL; - uint32_t cur_select = 0; - enum dcn20_input_csc_select select; - struct color_matrices_reg icsc_regs; - - if (input_select == DCN2_ICSC_SELECT_BYPASS) { - REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); - return; - } - - if (tbl_entry == NULL) { - for (i = 0; i < arr_size; i++) - if (dpp_input_csc_matrix[i].color_space == color_space) { - regval = dpp_input_csc_matrix[i].regval; - break; - } - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - } else { - regval = tbl_entry->regval; - } - - /* determine which CSC coefficients (A or B) we are using - * currently. select the alternate set to double buffer - * the CSC update so CSC is updated on frame boundary - */ - IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, - CM_TEST_DEBUG_DATA_STATUS_IDX, - CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select); - - if (cur_select != DCN2_ICSC_SELECT_ICSC_A) - select = DCN2_ICSC_SELECT_ICSC_A; - else - select = DCN2_ICSC_SELECT_ICSC_B; - - icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; - icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; - icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; - icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; - - if (select == DCN2_ICSC_SELECT_ICSC_A) { - - icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); - icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); - - } else { - - icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12); - icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34); - - } - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &icsc_regs); - - REG_SET(CM_ICSC_CONTROL, 0, - CM_ICSC_MODE, select); -} - -static void dpp20_power_on_blnd_lut( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_SET(CM_MEM_PWR_CTRL, 0, - BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0:1); - -} - -static void dpp20_configure_blnd_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK, - CM_BLNDGAM_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK, - CM_BLNDGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); - REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); -} - -static void dpp20_program_blnd_pwl( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - for (i = 0 ; i < num; i++) { - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); - - REG_SET(CM_BLNDGAM_LUT_DATA, 0, - CM_BLNDGAM_LUT_DATA, rgb[i].delta_red_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, - CM_BLNDGAM_LUT_DATA, rgb[i].delta_green_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, - CM_BLNDGAM_LUT_DATA, rgb[i].delta_blue_reg); - - } - -} - -static void dcn20_dpp_cm_get_reg_field( - struct dcn20_dpp *dpp, - struct xfer_func_reg *reg) -{ - reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B; - reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; - reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; - reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B; - reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; -} - -/*program blnd lut RAM A*/ -static void dpp20_program_blnd_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dcn20_dpp_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R); - gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1); - gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33); - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -/*program blnd lut RAM B*/ -static void dpp20_program_blnd_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - struct xfer_func_reg gam_regs; - - dcn20_dpp_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R); - gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1); - gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33); - - cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -static enum dc_lut_mode dpp20_get_blndgam_current(struct dpp *dpp_base) -{ - enum dc_lut_mode mode; - uint32_t state_mode; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, &state_mode); - - switch (state_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -bool dpp20_program_blnd_lut( - struct dpp *dpp_base, const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - if (params == NULL) { - REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, 0); - return false; - } - current_mode = dpp20_get_blndgam_current(dpp_base); - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - dpp20_power_on_blnd_lut(dpp_base, true); - dpp20_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - dpp20_program_blnd_luta_settings(dpp_base, params); - else - dpp20_program_blnd_lutb_settings(dpp_base, params); - - dpp20_program_blnd_pwl( - dpp_base, params->rgb_resulted, params->hw_points_num); - - REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, - next_mode == LUT_RAM_A ? 1:2); - - return true; -} - - -static void dpp20_program_shaper_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i, red, green, blue; - uint32_t red_delta, green_delta, blue_delta; - uint32_t red_value, green_value, blue_value; - - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - for (i = 0 ; i < num; i++) { - - red = rgb[i].red_reg; - green = rgb[i].green_reg; - blue = rgb[i].blue_reg; - - red_delta = rgb[i].delta_red_reg; - green_delta = rgb[i].delta_green_reg; - blue_delta = rgb[i].delta_blue_reg; - - red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); - green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); - blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); - - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value); - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value); - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value); - } - -} - -static enum dc_lut_mode dpp20_get_shaper_current(struct dpp *dpp_base) -{ - enum dc_lut_mode mode; - uint32_t state_mode; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_GET(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, &state_mode); - - switch (state_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -static void dpp20_configure_shaper_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, - CM_SHAPER_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, - CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); - REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0); -} - -/*program shaper RAM A*/ - -static void dpp20_program_shaper_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - const struct gamma_curve *curve; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0, - CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0, - CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0); - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0, - CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0, - CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0, - CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0, - CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0, - CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0, - CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0, - CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0, - CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0, - CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0, - CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0, - CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0, - CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0, - CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0, - CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0, - CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0, - CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0, - CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0, - CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0, - CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0, - CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0, - CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); -} - -/*program shaper RAM B*/ -static void dpp20_program_shaper_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - const struct gamma_curve *curve; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0, - CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0, - CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0); - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0, - CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0, - CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0, - CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0, - CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0, - CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0, - CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0, - CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0, - CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0, - CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0, - CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0, - CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0, - CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0, - CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0, - CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0, - CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0, - CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0, - CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0, - CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0, - CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0, - CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0, - CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); - -} - - -bool dpp20_program_shaper( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - if (params == NULL) { - REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); - return false; - } - current_mode = dpp20_get_shaper_current(dpp_base); - - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - dpp20_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - dpp20_program_shaper_luta_settings(dpp_base, params); - else - dpp20_program_shaper_lutb_settings(dpp_base, params); - - dpp20_program_shaper_lut( - dpp_base, params->rgb_resulted, params->hw_points_num); - - REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); - - return true; - -} - -static enum dc_lut_mode get3dlut_config( - struct dpp *dpp_base, - bool *is_17x17x17, - bool *is_12bits_color_channel) -{ - uint32_t i_mode, i_enable_10bits, lut_size; - enum dc_lut_mode mode; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_CONFIG_STATUS, &i_mode, - CM_3DLUT_30BIT_EN, &i_enable_10bits); - - switch (i_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - if (i_enable_10bits > 0) - *is_12bits_color_channel = false; - else - *is_12bits_color_channel = true; - - REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size); - - if (lut_size == 0) - *is_17x17x17 = true; - else - *is_17x17x17 = false; - - return mode; -} -/* - * select ramA or ramB, or bypass - * select color channel size 10 or 12 bits - * select 3dlut size 17x17x17 or 9x9x9 - */ -static void dpp20_set_3dlut_mode( - struct dpp *dpp_base, - enum dc_lut_mode mode, - bool is_color_channel_12bits, - bool is_lut_size17x17x17) -{ - uint32_t lut_mode; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - if (mode == LUT_BYPASS) - lut_mode = 0; - else if (mode == LUT_RAM_A) - lut_mode = 1; - else - lut_mode = 2; - - REG_UPDATE_2(CM_3DLUT_MODE, - CM_3DLUT_MODE, lut_mode, - CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); -} - -static void dpp20_select_3dlut_ram( - struct dpp *dpp_base, - enum dc_lut_mode mode, - bool is_color_channel_12bits) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, - CM_3DLUT_30BIT_EN, - is_color_channel_12bits == true ? 0:1); -} - - - -static void dpp20_set3dlut_ram12( - struct dpp *dpp_base, - const struct dc_rgb *lut, - uint32_t entries) -{ - uint32_t i, red, green, blue, red1, green1, blue1; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - for (i = 0 ; i < entries; i += 2) { - red = lut[i].red<<4; - green = lut[i].green<<4; - blue = lut[i].blue<<4; - red1 = lut[i+1].red<<4; - green1 = lut[i+1].green<<4; - blue1 = lut[i+1].blue<<4; - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, red, - CM_3DLUT_DATA1, red1); - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, green, - CM_3DLUT_DATA1, green1); - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, blue, - CM_3DLUT_DATA1, blue1); - - } -} - -/* - * load selected lut with 10 bits color channels - */ -static void dpp20_set3dlut_ram10( - struct dpp *dpp_base, - const struct dc_rgb *lut, - uint32_t entries) -{ - uint32_t i, red, green, blue, value; - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - for (i = 0; i < entries; i++) { - red = lut[i].red; - green = lut[i].green; - blue = lut[i].blue; - - value = (red<<20) | (green<<10) | blue; - - REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value); - } - -} - - -static void dpp20_select_3dlut_ram_mask( - struct dpp *dpp_base, - uint32_t ram_selection_mask) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, - ram_selection_mask); - REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); -} - -bool dpp20_program_3dlut( - struct dpp *dpp_base, - struct tetrahedral_params *params) -{ - enum dc_lut_mode mode; - bool is_17x17x17; - bool is_12bits_color_channel; - struct dc_rgb *lut0; - struct dc_rgb *lut1; - struct dc_rgb *lut2; - struct dc_rgb *lut3; - int lut_size0; - int lut_size; - - if (params == NULL) { - dpp20_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); - return false; - } - mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel); - - if (mode == LUT_BYPASS || mode == LUT_RAM_B) - mode = LUT_RAM_A; - else - mode = LUT_RAM_B; - - is_17x17x17 = !params->use_tetrahedral_9; - is_12bits_color_channel = params->use_12bits; - if (is_17x17x17) { - lut0 = params->tetrahedral_17.lut0; - lut1 = params->tetrahedral_17.lut1; - lut2 = params->tetrahedral_17.lut2; - lut3 = params->tetrahedral_17.lut3; - lut_size0 = sizeof(params->tetrahedral_17.lut0)/ - sizeof(params->tetrahedral_17.lut0[0]); - lut_size = sizeof(params->tetrahedral_17.lut1)/ - sizeof(params->tetrahedral_17.lut1[0]); - } else { - lut0 = params->tetrahedral_9.lut0; - lut1 = params->tetrahedral_9.lut1; - lut2 = params->tetrahedral_9.lut2; - lut3 = params->tetrahedral_9.lut3; - lut_size0 = sizeof(params->tetrahedral_9.lut0)/ - sizeof(params->tetrahedral_9.lut0[0]); - lut_size = sizeof(params->tetrahedral_9.lut1)/ - sizeof(params->tetrahedral_9.lut1[0]); - } - - dpp20_select_3dlut_ram(dpp_base, mode, - is_12bits_color_channel); - dpp20_select_3dlut_ram_mask(dpp_base, 0x1); - if (is_12bits_color_channel) - dpp20_set3dlut_ram12(dpp_base, lut0, lut_size0); - else - dpp20_set3dlut_ram10(dpp_base, lut0, lut_size0); - - dpp20_select_3dlut_ram_mask(dpp_base, 0x2); - if (is_12bits_color_channel) - dpp20_set3dlut_ram12(dpp_base, lut1, lut_size); - else - dpp20_set3dlut_ram10(dpp_base, lut1, lut_size); - - dpp20_select_3dlut_ram_mask(dpp_base, 0x4); - if (is_12bits_color_channel) - dpp20_set3dlut_ram12(dpp_base, lut2, lut_size); - else - dpp20_set3dlut_ram10(dpp_base, lut2, lut_size); - - dpp20_select_3dlut_ram_mask(dpp_base, 0x8); - if (is_12bits_color_channel) - dpp20_set3dlut_ram12(dpp_base, lut3, lut_size); - else - dpp20_set3dlut_ram10(dpp_base, lut3, lut_size); - - - dpp20_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel, - is_17x17x17); - - return true; -} - -void dpp2_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier) -{ - struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - - REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile index 2b0b4f32e13b..3880db59e457 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile @@ -2,7 +2,7 @@ # # Makefile for DCN. DCN201 = dcn201_hubbub.o\ - dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \ + dcn201_mpc.o dcn201_hubp.o dcn201_opp.o \ dcn201_dccg.o dcn201_link_encoder.o AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c deleted file mode 100644 index f809a7d21033..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" - -#include "core_types.h" - -#include "reg_helper.h" -#include "dcn201_dpp.h" -#include "basics/conversion.h" - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -static void dpp201_cnv_setup( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut) -{ - struct dcn201_dpp *dpp = TO_DCN201_DPP(dpp_base); - uint32_t pixel_format = 0; - uint32_t alpha_en = 1; - enum dc_color_space color_space = COLOR_SPACE_SRGB; - enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; - bool force_disable_cursor = false; - uint32_t is_2bit = 0; - - REG_SET_2(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_EXPANSION_MODE, mode); - - REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); - REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); - - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - pixel_format = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - pixel_format = 3; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - pixel_format = 8; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - pixel_format = 10; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - force_disable_cursor = false; - pixel_format = 65; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - force_disable_cursor = true; - pixel_format = 64; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - force_disable_cursor = true; - pixel_format = 67; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - force_disable_cursor = true; - pixel_format = 66; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - pixel_format = 22; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - pixel_format = 24; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - pixel_format = 25; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: - pixel_format = 12; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: - pixel_format = 112; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: - pixel_format = 113; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: - pixel_format = 114; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: - pixel_format = 115; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: - pixel_format = 118; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: - pixel_format = 119; - alpha_en = 0; - break; - default: - break; - } - - /* Set default color space based on format if none is given. */ - color_space = input_color_space ? input_color_space : color_space; - - if (is_2bit == 1 && alpha_2bit_lut != NULL) { - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); - } - - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, pixel_format); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); - - dpp1_program_input_csc(dpp_base, color_space, select, NULL); - - if (force_disable_cursor) { - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, 0); - REG_UPDATE(CURSOR0_CONTROL, - CUR0_ENABLE, 0); - } - dpp2_power_on_obuf(dpp_base, true); -} - -#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) - -static bool dpp201_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps) -{ - if (scl_data->viewport.width != scl_data->h_active && - scl_data->viewport.height != scl_data->v_active && - dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && - scl_data->format == PIXEL_FORMAT_FP16) - return false; - - if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - - if (scl_data->ratios.horz.value == (8ll << 32)) - scl_data->ratios.horz.value--; - if (scl_data->ratios.vert.value == (8ll << 32)) - scl_data->ratios.vert.value--; - if (scl_data->ratios.horz_c.value == (8ll << 32)) - scl_data->ratios.horz_c.value--; - if (scl_data->ratios.vert_c.value == (8ll << 32)) - scl_data->ratios.vert_c.value--; - - if (in_taps->h_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz) > 4) - scl_data->taps.h_taps = 8; - else - scl_data->taps.h_taps = 4; - } else - scl_data->taps.h_taps = in_taps->h_taps; - - if (in_taps->v_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert) > 4) - scl_data->taps.v_taps = 8; - else - scl_data->taps.v_taps = 4; - } else - scl_data->taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4) - scl_data->taps.v_taps_c = 4; - else - scl_data->taps.v_taps_c = 2; - } else - scl_data->taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4) - scl_data->taps.h_taps_c = 4; - else - scl_data->taps.h_taps_c = 2; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; - else - scl_data->taps.h_taps_c = in_taps->h_taps_c; - - if (!dpp->ctx->dc->debug.always_scale) { - if (IDENTITY_RATIO(scl_data->ratios.horz)) - scl_data->taps.h_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert)) - scl_data->taps.v_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.horz_c)) - scl_data->taps.h_taps_c = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert_c)) - scl_data->taps.v_taps_c = 1; - } - - return true; -} - -static struct dpp_funcs dcn201_dpp_funcs = { - .dpp_read_state = dpp20_read_state, - .dpp_reset = dpp_reset, - .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp201_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, - .dpp_set_csc_adjustment = NULL, - .dpp_set_csc_default = NULL, - .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, - .dpp_set_degamma = dpp2_set_degamma, - .dpp_program_input_lut = dpp2_dummy_program_input_lut, - .dpp_full_bypass = dpp1_full_bypass, - .dpp_setup = dpp201_cnv_setup, - .dpp_program_degamma_pwl = dpp2_set_degamma_pwl, - .dpp_program_blnd_lut = dpp20_program_blnd_lut, - .dpp_program_shaper_lut = dpp20_program_shaper, - .dpp_program_3dlut = dpp20_program_3dlut, - .dpp_program_bias_and_scale = NULL, - .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, - .set_cursor_attributes = dpp2_set_cursor_attributes, - .set_cursor_position = dpp1_set_cursor_position, - .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, - .dpp_dppclk_control = dpp1_dppclk_control, - .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, - .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, -}; - -static struct dpp_caps dcn201_dpp_cap = { - .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, - .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, -}; - -bool dpp201_construct( - struct dcn201_dpp *dpp, - struct dc_context *ctx, - uint32_t inst, - const struct dcn201_dpp_registers *tf_regs, - const struct dcn201_dpp_shift *tf_shift, - const struct dcn201_dpp_mask *tf_mask) -{ - dpp->base.ctx = ctx; - - dpp->base.inst = inst; - dpp->base.funcs = &dcn201_dpp_funcs; - dpp->base.caps = &dcn201_dpp_cap; - - dpp->tf_regs = tf_regs; - dpp->tf_shift = tf_shift; - dpp->tf_mask = tf_mask; - - dpp->lb_pixel_depth_supported = - LB_PIXEL_DEPTH_18BPP | - LB_PIXEL_DEPTH_24BPP | - LB_PIXEL_DEPTH_30BPP; - - dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; - dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; - - return true; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h deleted file mode 100644 index cbd5b47b4acf..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h +++ /dev/null @@ -1,83 +0,0 @@ -/* Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN201_DPP_H__ -#define __DCN201_DPP_H__ - -#include "dcn20/dcn20_dpp.h" - -#define TO_DCN201_DPP(dpp)\ - container_of(dpp, struct dcn201_dpp, base) - -#define TF_REG_LIST_DCN201(id) \ - TF_REG_LIST_DCN20(id) - -#define TF_REG_LIST_SH_MASK_DCN201(mask_sh)\ - TF_REG_LIST_SH_MASK_DCN20(mask_sh) - -#define TF_REG_FIELD_LIST_DCN201(type) \ - TF_REG_FIELD_LIST_DCN2_0(type) - -struct dcn201_dpp_shift { - TF_REG_FIELD_LIST_DCN201(uint8_t); -}; - -struct dcn201_dpp_mask { - TF_REG_FIELD_LIST_DCN201(uint32_t); -}; - -#define DPP_DCN201_REG_VARIABLE_LIST \ - DPP_DCN2_REG_VARIABLE_LIST - -struct dcn201_dpp_registers { - DPP_DCN201_REG_VARIABLE_LIST; -}; - -struct dcn201_dpp { - struct dpp base; - - const struct dcn201_dpp_registers *tf_regs; - const struct dcn201_dpp_shift *tf_shift; - const struct dcn201_dpp_mask *tf_mask; - - const uint16_t *filter_v; - const uint16_t *filter_h; - const uint16_t *filter_v_c; - const uint16_t *filter_h_c; - int lb_pixel_depth_supported; - int lb_memory_size; - int lb_bits_per_entry; - bool is_write_to_ram_a_safe; - struct scaler_data scl_data; - struct pwl_params pwl_data; -}; - -bool dpp201_construct(struct dcn201_dpp *dpp2, - struct dc_context *ctx, - uint32_t inst, - const struct dcn201_dpp_registers *tf_regs, - const struct dcn201_dpp_shift *tf_shift, - const struct dcn201_dpp_mask *tf_mask); - -#endif /* __DC_HWSS_DCN201_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index b5b2aa3b3783..c6ca70f3c061 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -25,13 +25,11 @@ DCN30 := dcn30_hubbub.o \ dcn30_hubp.o \ - dcn30_dpp.o \ dcn30_dccg.o \ dcn30_mpc.o dcn30_vpg.o \ dcn30_afmt.o \ dcn30_dio_stream_encoder.o \ dcn30_dwb.o \ - dcn30_dpp_cm.o \ dcn30_dwb_cm.o \ dcn30_cm_common.o \ dcn30_mmhubbub.o \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c index ddb344056d40..b8327237ed44 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c @@ -26,7 +26,7 @@ #include "dm_services.h" #include "core_types.h" #include "reg_helper.h" -#include "dcn30_dpp.h" +#include "dcn30/dcn30_dpp.h" #include "basics/conversion.h" #include "dcn30_cm_common.h" #include "custom_float.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c deleted file mode 100644 index a3a769aad042..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ /dev/null @@ -1,1527 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "core_types.h" -#include "reg_helper.h" -#include "dcn30_dpp.h" -#include "basics/conversion.h" -#include "dcn30_cm_common.h" - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - - -void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - uint32_t gamcor_lut_mode, rgam_lut_mode; - - REG_GET(DPP_CONTROL, - DPP_CLOCK_ENABLE, &s->is_enabled); - - // Pre-degamma (ROM) - REG_GET_2(PRE_DEGAM, - PRE_DEGAM_MODE, &s->pre_dgam_mode, - PRE_DEGAM_SELECT, &s->pre_dgam_select); - - // Gamma Correction (RAM) - REG_GET(CM_GAMCOR_CONTROL, - CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode); - if (s->gamcor_mode) { - REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode); - if (!gamcor_lut_mode) - s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B - } - - // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) - REG_GET(CM_SHAPER_CONTROL, - CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); - REG_GET(CM_3DLUT_MODE, - CM_3DLUT_MODE_CURRENT, &s->lut3d_mode); - REG_GET(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); - REG_GET(CM_3DLUT_MODE, - CM_3DLUT_SIZE, &s->lut3d_size); - - // Blend/Out Gamma (RAM) - REG_GET(CM_BLNDGAM_CONTROL, - CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode); - if (s->rgam_lut_mode){ - REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode); - if (!rgam_lut_mode) - s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B - } -} - -/*program post scaler scs block in dpp CM*/ -void dpp3_program_post_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn10_input_csc_select input_select, - const struct out_csc_color_matrix *tbl_entry) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - int i; - int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); - const uint16_t *regval = NULL; - uint32_t cur_select = 0; - enum dcn10_input_csc_select select; - struct color_matrices_reg gam_regs; - - if (input_select == INPUT_CSC_SELECT_BYPASS) { - REG_SET(CM_POST_CSC_CONTROL, 0, CM_POST_CSC_MODE, 0); - return; - } - - if (tbl_entry == NULL) { - for (i = 0; i < arr_size; i++) - if (dpp_input_csc_matrix[i].color_space == color_space) { - regval = dpp_input_csc_matrix[i].regval; - break; - } - - if (regval == NULL) { - BREAK_TO_DEBUGGER(); - return; - } - } else { - regval = tbl_entry->regval; - } - - /* determine which CSC matrix (icsc or coma) we are using - * currently. select the alternate set to double buffer - * the CSC update so CSC is updated on frame boundary - */ - REG_GET(CM_POST_CSC_CONTROL, - CM_POST_CSC_MODE_CURRENT, &cur_select); - - if (cur_select != INPUT_CSC_SELECT_ICSC) - select = INPUT_CSC_SELECT_ICSC; - else - select = INPUT_CSC_SELECT_COMA; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_POST_CSC_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_POST_CSC_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_POST_CSC_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_POST_CSC_C12; - - if (select == INPUT_CSC_SELECT_ICSC) { - - gam_regs.csc_c11_c12 = REG(CM_POST_CSC_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_POST_CSC_C33_C34); - - } else { - - gam_regs.csc_c11_c12 = REG(CM_POST_CSC_B_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_POST_CSC_B_C33_C34); - - } - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - REG_SET(CM_POST_CSC_CONTROL, 0, - CM_POST_CSC_MODE, select); -} - - -/*CNVC degam unit has read only LUTs*/ -void dpp3_set_pre_degam(struct dpp *dpp_base, enum dc_transfer_func_predefined tr) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - int pre_degam_en = 1; - int degamma_lut_selection = 0; - - switch (tr) { - case TRANSFER_FUNCTION_LINEAR: - case TRANSFER_FUNCTION_UNITY: - pre_degam_en = 0; //bypass - break; - case TRANSFER_FUNCTION_SRGB: - degamma_lut_selection = 0; - break; - case TRANSFER_FUNCTION_BT709: - degamma_lut_selection = 4; - break; - case TRANSFER_FUNCTION_PQ: - degamma_lut_selection = 5; - break; - case TRANSFER_FUNCTION_HLG: - degamma_lut_selection = 6; - break; - case TRANSFER_FUNCTION_GAMMA22: - degamma_lut_selection = 1; - break; - case TRANSFER_FUNCTION_GAMMA24: - degamma_lut_selection = 2; - break; - case TRANSFER_FUNCTION_GAMMA26: - degamma_lut_selection = 3; - break; - default: - pre_degam_en = 0; - break; - } - - REG_SET_2(PRE_DEGAM, 0, - PRE_DEGAM_MODE, pre_degam_en, - PRE_DEGAM_SELECT, degamma_lut_selection); -} - -void dpp3_cnv_setup ( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - uint32_t pixel_format = 0; - uint32_t alpha_en = 1; - enum dc_color_space color_space = COLOR_SPACE_SRGB; - enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; - bool force_disable_cursor = false; - uint32_t is_2bit = 0; - uint32_t alpha_plane_enable = 0; - uint32_t dealpha_en = 0, dealpha_ablnd_en = 0; - uint32_t realpha_en = 0, realpha_ablnd_en = 0; - uint32_t program_prealpha_dealpha = 0; - struct out_csc_color_matrix tbl_entry; - int i; - - REG_SET_2(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_EXPANSION_MODE, mode); - - REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); - REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); - REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); - - REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_R, 0); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_G, 1); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_B, 2); - - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - pixel_format = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - pixel_format = 3; - alpha_en = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - pixel_format = 8; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - pixel_format = 10; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - force_disable_cursor = false; - pixel_format = 65; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - force_disable_cursor = true; - pixel_format = 64; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - force_disable_cursor = true; - pixel_format = 67; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - force_disable_cursor = true; - pixel_format = 66; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - pixel_format = 26; /* ARGB16161616_UNORM */ - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - pixel_format = 24; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - pixel_format = 25; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: - pixel_format = 12; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: - pixel_format = 112; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: - pixel_format = 113; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: - pixel_format = 114; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: - pixel_format = 115; - color_space = COLOR_SPACE_YCBCR709; - select = INPUT_CSC_SELECT_ICSC; - is_2bit = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGBE: - pixel_format = 116; - alpha_plane_enable = 0; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: - pixel_format = 116; - alpha_plane_enable = 1; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: - pixel_format = 118; - break; - case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: - pixel_format = 119; - break; - default: - break; - } - - /* Set default color space based on format if none is given. */ - color_space = input_color_space ? input_color_space : color_space; - - if (is_2bit == 1 && alpha_2bit_lut != NULL) { - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); - REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); - } - - REG_SET_2(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, pixel_format, - CNVC_ALPHA_PLANE_ENABLE, alpha_plane_enable); - REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); - - if (program_prealpha_dealpha) { - dealpha_en = 1; - realpha_en = 1; - } - REG_SET_2(PRE_DEALPHA, 0, - PRE_DEALPHA_EN, dealpha_en, - PRE_DEALPHA_ABLND_EN, dealpha_ablnd_en); - REG_SET_2(PRE_REALPHA, 0, - PRE_REALPHA_EN, realpha_en, - PRE_REALPHA_ABLND_EN, realpha_ablnd_en); - - /* If input adjustment exists, program the ICSC with those values. */ - if (input_csc_color_matrix.enable_adjustment == true) { - for (i = 0; i < 12; i++) - tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; - - tbl_entry.color_space = input_color_space; - - if (color_space >= COLOR_SPACE_YCBCR601) - select = INPUT_CSC_SELECT_ICSC; - else - select = INPUT_CSC_SELECT_BYPASS; - - dpp3_program_post_csc(dpp_base, color_space, select, - &tbl_entry); - } else { - dpp3_program_post_csc(dpp_base, color_space, select, NULL); - } - - if (force_disable_cursor) { - REG_UPDATE(CURSOR_CONTROL, - CURSOR_ENABLE, 0); - REG_UPDATE(CURSOR0_CONTROL, - CUR0_ENABLE, 0); - } -} - -#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) - -void dpp3_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes) -{ - enum dc_cursor_color_format color_format = cursor_attributes->color_format; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - int cur_rom_en = 0; - - if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || - color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { - cur_rom_en = 1; - } - } - - REG_UPDATE_3(CURSOR0_CONTROL, - CUR0_MODE, color_format, - CUR0_EXPANSION_MODE, 0, - CUR0_ROM_EN, cur_rom_en); - - if (color_format == CURSOR_MODE_MONO) { - /* todo: clarify what to program these to */ - REG_UPDATE(CURSOR0_COLOR0, - CUR0_COLOR0, 0x00000000); - REG_UPDATE(CURSOR0_COLOR1, - CUR0_COLOR1, 0xFFFFFFFF); - } - - dpp_base->att.cur0_ctl.bits.expansion_mode = 0; - dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en; - dpp_base->att.cur0_ctl.bits.mode = color_format; -} - - -bool dpp3_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps) -{ - int num_part_y, num_part_c; - int max_taps_y, max_taps_c; - int min_taps_y, min_taps_c; - enum lb_memory_config lb_config; - - if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - - /* - * Set default taps if none are provided - * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling - * taps = 4 for upscaling - */ - if (in_taps->h_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz) > 1) - scl_data->taps.h_taps = min(2 * dc_fixpt_ceil(scl_data->ratios.horz), 8); - else - scl_data->taps.h_taps = 4; - } else - scl_data->taps.h_taps = in_taps->h_taps; - if (in_taps->v_taps == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert) > 1) - scl_data->taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert, 2)), 8); - else - scl_data->taps.v_taps = 4; - } else - scl_data->taps.v_taps = in_taps->v_taps; - if (in_taps->v_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 1) - scl_data->taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert_c, 2)), 8); - else - scl_data->taps.v_taps_c = 4; - } else - scl_data->taps.v_taps_c = in_taps->v_taps_c; - if (in_taps->h_taps_c == 0) { - if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 1) - scl_data->taps.h_taps_c = min(2 * dc_fixpt_ceil(scl_data->ratios.horz_c), 8); - else - scl_data->taps.h_taps_c = 4; - } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) - /* Only 1 and even h_taps_c are supported by hw */ - scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; - else - scl_data->taps.h_taps_c = in_taps->h_taps_c; - - /*Ensure we can support the requested number of vtaps*/ - min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert); - min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c); - - /* Use LB_MEMORY_CONFIG_3 for 4:2:0 */ - if ((scl_data->format == PIXEL_FORMAT_420BPP8) || (scl_data->format == PIXEL_FORMAT_420BPP10)) - lb_config = LB_MEMORY_CONFIG_3; - else - lb_config = LB_MEMORY_CONFIG_0; - - dpp->caps->dscl_calc_lb_num_partitions( - scl_data, lb_config, &num_part_y, &num_part_c); - - /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */ - if (dc_fixpt_ceil(scl_data->ratios.vert) > 2) - max_taps_y = num_part_y - (dc_fixpt_ceil(scl_data->ratios.vert) - 2); - else - max_taps_y = num_part_y; - - if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 2) - max_taps_c = num_part_c - (dc_fixpt_ceil(scl_data->ratios.vert_c) - 2); - else - max_taps_c = num_part_c; - - if (max_taps_y < min_taps_y) - return false; - else if (max_taps_c < min_taps_c) - return false; - - if (scl_data->taps.v_taps > max_taps_y) - scl_data->taps.v_taps = max_taps_y; - - if (scl_data->taps.v_taps_c > max_taps_c) - scl_data->taps.v_taps_c = max_taps_c; - - if (!dpp->ctx->dc->debug.always_scale) { - if (IDENTITY_RATIO(scl_data->ratios.horz)) - scl_data->taps.h_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert)) - scl_data->taps.v_taps = 1; - if (IDENTITY_RATIO(scl_data->ratios.horz_c)) - scl_data->taps.h_taps_c = 1; - if (IDENTITY_RATIO(scl_data->ratios.vert_c)) - scl_data->taps.v_taps_c = 1; - } - - return true; -} - -static void dpp3_deferred_update(struct dpp *dpp_base) -{ - int bypass_state; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (dpp_base->deferred_reg_writes.bits.disable_dscl) { - REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); - dpp_base->deferred_reg_writes.bits.disable_dscl = false; - } - - if (dpp_base->deferred_reg_writes.bits.disable_gamcor) { - REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &bypass_state); - if (bypass_state == 0) { // only program if bypass was latched - REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 3); - } else - ASSERT(0); // LUT select was updated again before vupdate - dpp_base->deferred_reg_writes.bits.disable_gamcor = false; - } - - if (dpp_base->deferred_reg_writes.bits.disable_blnd_lut) { - REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &bypass_state); - if (bypass_state == 0) { // only program if bypass was latched - REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 3); - } else - ASSERT(0); // LUT select was updated again before vupdate - dpp_base->deferred_reg_writes.bits.disable_blnd_lut = false; - } - - if (dpp_base->deferred_reg_writes.bits.disable_3dlut) { - REG_GET(CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, &bypass_state); - if (bypass_state == 0) { // only program if bypass was latched - REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 3); - } else - ASSERT(0); // LUT select was updated again before vupdate - dpp_base->deferred_reg_writes.bits.disable_3dlut = false; - } - - if (dpp_base->deferred_reg_writes.bits.disable_shaper) { - REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &bypass_state); - if (bypass_state == 0) { // only program if bypass was latched - REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 3); - } else - ASSERT(0); // LUT select was updated again before vupdate - dpp_base->deferred_reg_writes.bits.disable_shaper = false; - } -} - -static void dpp3_power_on_blnd_lut( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - if (power_on) { - REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 0); - REG_WAIT(CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, 0, 1, 5); - } else { - dpp_base->ctx->dc->optimized_required = true; - dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; - } - } else { - REG_SET(CM_MEM_PWR_CTRL, 0, - BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); - } -} - -static void dpp3_power_on_hdr3dlut( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - if (power_on) { - REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 0); - REG_WAIT(CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, 0, 1, 5); - } else { - dpp_base->ctx->dc->optimized_required = true; - dpp_base->deferred_reg_writes.bits.disable_3dlut = true; - } - } -} - -static void dpp3_power_on_shaper( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - if (power_on) { - REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 0); - REG_WAIT(CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, 0, 1, 5); - } else { - dpp_base->ctx->dc->optimized_required = true; - dpp_base->deferred_reg_writes.bits.disable_shaper = true; - } - } -} - -static void dpp3_configure_blnd_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE_2(CM_BLNDGAM_LUT_CONTROL, - CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 7, - CM_BLNDGAM_LUT_HOST_SEL, is_ram_a == true ? 0 : 1); - - REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); -} - -static void dpp3_program_blnd_pwl( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; - uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; - uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; - - if (is_rgb_equal(rgb, num)) { - for (i = 0 ; i < num; i++) - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); - } else { - REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); - REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4); - for (i = 0 ; i < num; i++) - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); - - REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); - REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2); - for (i = 0 ; i < num; i++) - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green); - - REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); - REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1); - for (i = 0 ; i < num; i++) - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); - REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_blue); - } -} - -static void dcn3_dpp_cm_get_reg_field( - struct dcn3_dpp *dpp, - struct dcn3_xfer_func_reg *reg) -{ - reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B; - reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; - reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; - reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B; - reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; -} - -/*program blnd lut RAM A*/ -static void dpp3_program_blnd_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - struct dcn3_xfer_func_reg gam_regs; - - dcn3_dpp_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R); - gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1); - gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33); - - cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -/*program blnd lut RAM B*/ -static void dpp3_program_blnd_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - struct dcn3_xfer_func_reg gam_regs; - - dcn3_dpp_cm_get_reg_field(dpp, &gam_regs); - - gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R); - gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1); - gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33); - - cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs); -} - -static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base) -{ - enum dc_lut_mode mode; - uint32_t mode_current = 0; - uint32_t in_use = 0; - - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &mode_current); - REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &in_use); - - switch (mode_current) { - case 0: - case 1: - mode = LUT_BYPASS; - break; - - case 2: - if (in_use == 0) - mode = LUT_RAM_A; - else - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -static bool dpp3_program_blnd_lut(struct dpp *dpp_base, - const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (params == NULL) { - REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_blnd_lut(dpp_base, false); - return false; - } - - current_mode = dpp3_get_blndgam_current(dpp_base); - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_B) - next_mode = LUT_RAM_A; - else - next_mode = LUT_RAM_B; - - dpp3_power_on_blnd_lut(dpp_base, true); - dpp3_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - dpp3_program_blnd_luta_settings(dpp_base, params); - else - dpp3_program_blnd_lutb_settings(dpp_base, params); - - dpp3_program_blnd_pwl( - dpp_base, params->rgb_resulted, params->hw_points_num); - - REG_UPDATE_2(CM_BLNDGAM_CONTROL, - CM_BLNDGAM_MODE, 2, - CM_BLNDGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1); - - return true; -} - - -static void dpp3_program_shaper_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num) -{ - uint32_t i, red, green, blue; - uint32_t red_delta, green_delta, blue_delta; - uint32_t red_value, green_value, blue_value; - - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - for (i = 0 ; i < num; i++) { - - red = rgb[i].red_reg; - green = rgb[i].green_reg; - blue = rgb[i].blue_reg; - - red_delta = rgb[i].delta_red_reg; - green_delta = rgb[i].delta_green_reg; - blue_delta = rgb[i].delta_blue_reg; - - red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); - green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); - blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); - - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value); - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value); - REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value); - } - -} - -static enum dc_lut_mode dpp3_get_shaper_current(struct dpp *dpp_base) -{ - enum dc_lut_mode mode; - uint32_t state_mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &state_mode); - - switch (state_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - - return mode; -} - -static void dpp3_configure_shaper_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, - CM_SHAPER_LUT_WRITE_EN_MASK, 7); - REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, - CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); - REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0); -} - -/*program shaper RAM A*/ - -static void dpp3_program_shaper_luta_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - const struct gamma_curve *curve; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0, - CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0, - CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0); - REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0, - CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0, - CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0, - CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0, - CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, - CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0, - CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0, - CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0, - CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0, - CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0, - CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0, - CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0, - CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0, - CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0, - CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0, - CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0, - CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0, - CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0, - CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0, - CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0, - CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0, - CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0, - CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); -} - -/*program shaper RAM B*/ -static void dpp3_program_shaper_lutb_settings( - struct dpp *dpp_base, - const struct pwl_params *params) -{ - const struct gamma_curve *curve; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0, - CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0); - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0, - CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0); - REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0, - CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0, - CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0, - CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); - - REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0, - CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, - CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); - - curve = params->arr_curve_points; - REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0, - CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0, - CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0, - CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0, - CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0, - CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0, - CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0, - CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0, - CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0, - CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0, - CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0, - CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0, - CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0, - CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0, - CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0, - CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0, - CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); - - curve += 2; - REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0, - CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset, - CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, - CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset, - CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); - -} - - -static bool dpp3_program_shaper(struct dpp *dpp_base, - const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (params == NULL) { - REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_shaper(dpp_base, false); - return false; - } - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_shaper(dpp_base, true); - - current_mode = dpp3_get_shaper_current(dpp_base); - - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - dpp3_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_A) - dpp3_program_shaper_luta_settings(dpp_base, params); - else - dpp3_program_shaper_lutb_settings(dpp_base, params); - - dpp3_program_shaper_lut( - dpp_base, params->rgb_resulted, params->hw_points_num); - - REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); - - return true; - -} - -static enum dc_lut_mode get3dlut_config( - struct dpp *dpp_base, - bool *is_17x17x17, - bool *is_12bits_color_channel) -{ - uint32_t i_mode, i_enable_10bits, lut_size; - enum dc_lut_mode mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_GET(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_30BIT_EN, &i_enable_10bits); - REG_GET(CM_3DLUT_MODE, - CM_3DLUT_MODE_CURRENT, &i_mode); - - switch (i_mode) { - case 0: - mode = LUT_BYPASS; - break; - case 1: - mode = LUT_RAM_A; - break; - case 2: - mode = LUT_RAM_B; - break; - default: - mode = LUT_BYPASS; - break; - } - if (i_enable_10bits > 0) - *is_12bits_color_channel = false; - else - *is_12bits_color_channel = true; - - REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size); - - if (lut_size == 0) - *is_17x17x17 = true; - else - *is_17x17x17 = false; - - return mode; -} -/* - * select ramA or ramB, or bypass - * select color channel size 10 or 12 bits - * select 3dlut size 17x17x17 or 9x9x9 - */ -static void dpp3_set_3dlut_mode( - struct dpp *dpp_base, - enum dc_lut_mode mode, - bool is_color_channel_12bits, - bool is_lut_size17x17x17) -{ - uint32_t lut_mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (mode == LUT_BYPASS) - lut_mode = 0; - else if (mode == LUT_RAM_A) - lut_mode = 1; - else - lut_mode = 2; - - REG_UPDATE_2(CM_3DLUT_MODE, - CM_3DLUT_MODE, lut_mode, - CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); -} - -static void dpp3_select_3dlut_ram( - struct dpp *dpp_base, - enum dc_lut_mode mode, - bool is_color_channel_12bits) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL, - CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, - CM_3DLUT_30BIT_EN, - is_color_channel_12bits == true ? 0:1); -} - - - -static void dpp3_set3dlut_ram12( - struct dpp *dpp_base, - const struct dc_rgb *lut, - uint32_t entries) -{ - uint32_t i, red, green, blue, red1, green1, blue1; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - for (i = 0 ; i < entries; i += 2) { - red = lut[i].red<<4; - green = lut[i].green<<4; - blue = lut[i].blue<<4; - red1 = lut[i+1].red<<4; - green1 = lut[i+1].green<<4; - blue1 = lut[i+1].blue<<4; - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, red, - CM_3DLUT_DATA1, red1); - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, green, - CM_3DLUT_DATA1, green1); - - REG_SET_2(CM_3DLUT_DATA, 0, - CM_3DLUT_DATA0, blue, - CM_3DLUT_DATA1, blue1); - - } -} - -/* - * load selected lut with 10 bits color channels - */ -static void dpp3_set3dlut_ram10( - struct dpp *dpp_base, - const struct dc_rgb *lut, - uint32_t entries) -{ - uint32_t i, red, green, blue, value; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - for (i = 0; i < entries; i++) { - red = lut[i].red; - green = lut[i].green; - blue = lut[i].blue; - - value = (red<<20) | (green<<10) | blue; - - REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value); - } - -} - - -static void dpp3_select_3dlut_ram_mask( - struct dpp *dpp_base, - uint32_t ram_selection_mask) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, - ram_selection_mask); - REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); -} - -static bool dpp3_program_3dlut(struct dpp *dpp_base, - struct tetrahedral_params *params) -{ - enum dc_lut_mode mode; - bool is_17x17x17; - bool is_12bits_color_channel; - struct dc_rgb *lut0; - struct dc_rgb *lut1; - struct dc_rgb *lut2; - struct dc_rgb *lut3; - int lut_size0; - int lut_size; - - if (params == NULL) { - dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_hdr3dlut(dpp_base, false); - return false; - } - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_hdr3dlut(dpp_base, true); - - mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel); - - if (mode == LUT_BYPASS || mode == LUT_RAM_B) - mode = LUT_RAM_A; - else - mode = LUT_RAM_B; - - is_17x17x17 = !params->use_tetrahedral_9; - is_12bits_color_channel = params->use_12bits; - if (is_17x17x17) { - lut0 = params->tetrahedral_17.lut0; - lut1 = params->tetrahedral_17.lut1; - lut2 = params->tetrahedral_17.lut2; - lut3 = params->tetrahedral_17.lut3; - lut_size0 = sizeof(params->tetrahedral_17.lut0)/ - sizeof(params->tetrahedral_17.lut0[0]); - lut_size = sizeof(params->tetrahedral_17.lut1)/ - sizeof(params->tetrahedral_17.lut1[0]); - } else { - lut0 = params->tetrahedral_9.lut0; - lut1 = params->tetrahedral_9.lut1; - lut2 = params->tetrahedral_9.lut2; - lut3 = params->tetrahedral_9.lut3; - lut_size0 = sizeof(params->tetrahedral_9.lut0)/ - sizeof(params->tetrahedral_9.lut0[0]); - lut_size = sizeof(params->tetrahedral_9.lut1)/ - sizeof(params->tetrahedral_9.lut1[0]); - } - - dpp3_select_3dlut_ram(dpp_base, mode, - is_12bits_color_channel); - dpp3_select_3dlut_ram_mask(dpp_base, 0x1); - if (is_12bits_color_channel) - dpp3_set3dlut_ram12(dpp_base, lut0, lut_size0); - else - dpp3_set3dlut_ram10(dpp_base, lut0, lut_size0); - - dpp3_select_3dlut_ram_mask(dpp_base, 0x2); - if (is_12bits_color_channel) - dpp3_set3dlut_ram12(dpp_base, lut1, lut_size); - else - dpp3_set3dlut_ram10(dpp_base, lut1, lut_size); - - dpp3_select_3dlut_ram_mask(dpp_base, 0x4); - if (is_12bits_color_channel) - dpp3_set3dlut_ram12(dpp_base, lut2, lut_size); - else - dpp3_set3dlut_ram10(dpp_base, lut2, lut_size); - - dpp3_select_3dlut_ram_mask(dpp_base, 0x8); - if (is_12bits_color_channel) - dpp3_set3dlut_ram12(dpp_base, lut3, lut_size); - else - dpp3_set3dlut_ram10(dpp_base, lut3, lut_size); - - - dpp3_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel, - is_17x17x17); - - return true; -} -static struct dpp_funcs dcn30_dpp_funcs = { - .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, - .dpp_read_state = dpp30_read_state, - .dpp_reset = dpp_reset, - .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, - .dpp_set_csc_adjustment = NULL, - .dpp_set_csc_default = NULL, - .dpp_program_regamma_pwl = NULL, - .dpp_set_pre_degam = dpp3_set_pre_degam, - .dpp_program_input_lut = NULL, - .dpp_full_bypass = dpp1_full_bypass, - .dpp_setup = dpp3_cnv_setup, - .dpp_program_degamma_pwl = NULL, - .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, - .dpp_program_cm_bias = dpp3_program_cm_bias, - .dpp_program_blnd_lut = dpp3_program_blnd_lut, - .dpp_program_shaper_lut = dpp3_program_shaper, - .dpp_program_3dlut = dpp3_program_3dlut, - .dpp_deferred_update = dpp3_deferred_update, - .dpp_program_bias_and_scale = NULL, - .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, - .set_cursor_attributes = dpp3_set_cursor_attributes, - .set_cursor_position = dpp1_set_cursor_position, - .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, - .dpp_dppclk_control = dpp1_dppclk_control, - .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, - .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, -}; - - -static struct dpp_caps dcn30_dpp_cap = { - .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, - .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, -}; - -bool dpp3_construct( - struct dcn3_dpp *dpp, - struct dc_context *ctx, - uint32_t inst, - const struct dcn3_dpp_registers *tf_regs, - const struct dcn3_dpp_shift *tf_shift, - const struct dcn3_dpp_mask *tf_mask) -{ - dpp->base.ctx = ctx; - - dpp->base.inst = inst; - dpp->base.funcs = &dcn30_dpp_funcs; - dpp->base.caps = &dcn30_dpp_cap; - - dpp->tf_regs = tf_regs; - dpp->tf_shift = tf_shift; - dpp->tf_mask = tf_mask; - - return true; -} - diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h deleted file mode 100644 index 2ac8045a87a1..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.h +++ /dev/null @@ -1,642 +0,0 @@ -/* Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN30_DPP_H__ -#define __DCN30_DPP_H__ - -#include "dcn20/dcn20_dpp.h" - -#define TO_DCN30_DPP(dpp)\ - container_of(dpp, struct dcn3_dpp, base) - -#define DPP_REG_LIST_DCN30_COMMON(id)\ - SRI(CM_DEALPHA, CM, id),\ - SRI(CM_MEM_PWR_STATUS, CM, id),\ - SRI(CM_BIAS_CR_R, CM, id),\ - SRI(CM_BIAS_Y_G_CB_B, CM, id),\ - SRI(PRE_DEGAM, CNVC_CFG, id),\ - SRI(CM_GAMCOR_CONTROL, CM, id),\ - SRI(CM_GAMCOR_LUT_CONTROL, CM, id),\ - SRI(CM_GAMCOR_LUT_INDEX, CM, id),\ - SRI(CM_GAMCOR_LUT_INDEX, CM, id),\ - SRI(CM_GAMCOR_LUT_DATA, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_CNTL_R, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id),\ - SRI(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id),\ - SRI(CM_GAMCOR_RAMB_REGION_0_1, CM, id),\ - SRI(CM_GAMCOR_RAMB_REGION_32_33, CM, id),\ - SRI(CM_GAMCOR_RAMB_OFFSET_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_OFFSET_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_OFFSET_R, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_CNTL_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_REGION_0_1, CM, id),\ - SRI(CM_GAMCOR_RAMA_REGION_32_33, CM, id),\ - SRI(CM_GAMCOR_RAMA_OFFSET_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_OFFSET_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_OFFSET_R, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id),\ - SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id),\ - SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\ - SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\ - SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\ - SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\ - SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\ - SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\ - SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ - SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ - SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ - SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ - SRI(OTG_H_BLANK, DSCL, id), \ - SRI(OTG_V_BLANK, DSCL, id), \ - SRI(SCL_MODE, DSCL, id), \ - SRI(LB_DATA_FORMAT, DSCL, id), \ - SRI(LB_MEMORY_CTRL, DSCL, id), \ - SRI(DSCL_AUTOCAL, DSCL, id), \ - SRI(DSCL_CONTROL, DSCL, id), \ - SRI(SCL_TAP_CONTROL, DSCL, id), \ - SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \ - SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \ - SRI(DSCL_2TAP_CONTROL, DSCL, id), \ - SRI(MPC_SIZE, DSCL, id), \ - SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \ - SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \ - SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \ - SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \ - SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \ - SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT, DSCL, id), \ - SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \ - SRI(RECOUT_START, DSCL, id), \ - SRI(RECOUT_SIZE, DSCL, id), \ - SRI(PRE_DEALPHA, CNVC_CFG, id), \ - SRI(PRE_REALPHA, CNVC_CFG, id), \ - SRI(PRE_CSC_MODE, CNVC_CFG, id), \ - SRI(PRE_CSC_C11_C12, CNVC_CFG, id), \ - SRI(PRE_CSC_C33_C34, CNVC_CFG, id), \ - SRI(PRE_CSC_B_C11_C12, CNVC_CFG, id), \ - SRI(PRE_CSC_B_C33_C34, CNVC_CFG, id), \ - SRI(CM_POST_CSC_CONTROL, CM, id), \ - SRI(CM_POST_CSC_C11_C12, CM, id), \ - SRI(CM_POST_CSC_C33_C34, CM, id), \ - SRI(CM_POST_CSC_B_C11_C12, CM, id), \ - SRI(CM_POST_CSC_B_C33_C34, CM, id), \ - SRI(CM_MEM_PWR_CTRL, CM, id), \ - SRI(CM_CONTROL, CM, id), \ - SRI(FORMAT_CONTROL, CNVC_CFG, id), \ - SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ - SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ - SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ - SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ - SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \ - SRI(DPP_CONTROL, DPP_TOP, id), \ - SRI(CM_HDR_MULT_COEF, CM, id), \ - SRI(CURSOR_CONTROL, CURSOR0_, id), \ - SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \ - SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \ - SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \ - SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \ - SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \ - SRI(COLOR_KEYER_RED, CNVC_CFG, id), \ - SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \ - SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \ - SRI(CURSOR_CONTROL, CURSOR0_, id),\ - SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ - SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \ - SRI(DSCL_MEM_PWR_CTRL, DSCL, id) - -#define DPP_REG_LIST_DCN30(id)\ - DPP_REG_LIST_DCN30_COMMON(id), \ - TF_REG_LIST_DCN20_COMMON(id), \ - SRI(CM_BLNDGAM_CONTROL, CM, id), \ - SRI(CM_SHAPER_LUT_DATA, CM, id),\ - SRI(CM_MEM_PWR_CTRL2, CM, id), \ - SRI(CM_MEM_PWR_STATUS2, CM, id), \ - SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\ - SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\ - SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\ - SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM, id),\ - SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM, id),\ - SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM, id),\ - SRI(CM_BLNDGAM_LUT_CONTROL, CM, id) - - - -#define DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh)\ - TF_SF(CM0_CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, mask_sh),\ - TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_EN, mask_sh),\ - TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_ABLND, mask_sh),\ - TF_SF(CM0_CM_BIAS_CR_R, CM_BIAS_CR_R, mask_sh),\ - TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_Y_G, mask_sh),\ - TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_CB_B, mask_sh),\ - TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_DIS, mask_sh),\ - TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, mask_sh),\ - TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_MODE, mask_sh),\ - TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_SELECT, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_PWL_DISABLE, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_INDEX, CM_GAMCOR_LUT_INDEX, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_DATA, CM_GAMCOR_LUT_DATA, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_WRITE_COLOR_MASK, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_READ_COLOR_SEL, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_HOST_SEL, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_CONFIG_MODE, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL1_B, CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_OFFSET_B, CM_GAMCOR_RAMA_OFFSET_B, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ - TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\ - TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\ - TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\ - TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\ - TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\ - TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\ - TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\ - TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\ - TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\ - TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\ - TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\ - TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\ - TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\ - TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\ - TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\ - TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\ - TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\ - TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\ - TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\ - TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\ - TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\ - TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\ - TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\ - TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\ - TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_EN, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_ABLND_EN, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_EN, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_ABLND_EN, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE_CURRENT, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C11, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C12, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C33, mask_sh), \ - TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C34, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE_CURRENT, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C11, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \ - TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ - TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ - TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \ - TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_ALPHA_PLANE_ENABLE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \ - TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \ - TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh), \ - TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ - TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_R, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_G, mask_sh), \ - TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_B, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \ - TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \ - TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \ - TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \ - TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\ - TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ - TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh),\ - TF_SF(DSCL0_DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, mask_sh) - -#define DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh)\ - TF_SF(CM0_CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, mask_sh), \ - TF_SF(CM0_CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, mask_sh),\ - TF_SF(CM0_CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, mask_sh),\ - TF_SF(CM0_CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, mask_sh),\ - TF_SF(CM0_CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, mask_sh),\ - TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_HOST_SEL, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_CONFIG_MODE, mask_sh), \ - TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, mask_sh), \ - TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, mask_sh) - - -#define DPP_REG_LIST_SH_MASK_DCN30(mask_sh)\ - DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \ - TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ - DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh) - -#define DPP_REG_FIELD_LIST_DCN3(type) \ - TF_REG_FIELD_LIST_DCN2_0(type); \ - type FORMAT_CROSSBAR_R; \ - type FORMAT_CROSSBAR_G; \ - type FORMAT_CROSSBAR_B; \ - type CM_DEALPHA_EN;\ - type CM_DEALPHA_ABLND;\ - type CM_BIAS_Y_G;\ - type CM_BIAS_CB_B;\ - type CM_BIAS_CR_R;\ - type GAMCOR_MEM_PWR_DIS; \ - type GAMCOR_MEM_PWR_FORCE; \ - type HDR3DLUT_MEM_PWR_FORCE; \ - type SHAPER_MEM_PWR_FORCE; \ - type PRE_DEGAM_MODE;\ - type PRE_DEGAM_SELECT;\ - type CNVC_ALPHA_PLANE_ENABLE; \ - type PRE_DEALPHA_EN; \ - type PRE_DEALPHA_ABLND_EN; \ - type PRE_REALPHA_EN; \ - type PRE_REALPHA_ABLND_EN; \ - type PRE_CSC_MODE; \ - type PRE_CSC_MODE_CURRENT; \ - type PRE_CSC_C11; \ - type PRE_CSC_C12; \ - type PRE_CSC_C33; \ - type PRE_CSC_C34; \ - type CM_POST_CSC_MODE; \ - type CM_POST_CSC_MODE_CURRENT; \ - type CM_POST_CSC_C11; \ - type CM_POST_CSC_C12; \ - type CM_POST_CSC_C33; \ - type CM_POST_CSC_C34; \ - type CM_GAMCOR_MODE; \ - type CM_GAMCOR_SELECT; \ - type CM_GAMCOR_PWL_DISABLE; \ - type CM_GAMCOR_MODE_CURRENT; \ - type CM_GAMCOR_SELECT_CURRENT; \ - type CM_GAMCOR_LUT_INDEX; \ - type CM_GAMCOR_LUT_DATA; \ - type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \ - type CM_GAMCOR_LUT_READ_COLOR_SEL; \ - type CM_GAMCOR_LUT_HOST_SEL; \ - type CM_GAMCOR_LUT_CONFIG_MODE; \ - type CM_GAMCOR_LUT_STATUS; \ - type CM_GAMCOR_RAMA_EXP_REGION_START_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_END_B; \ - type CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; \ - type CM_GAMCOR_RAMA_OFFSET_B; \ - type CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; \ - type CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; \ - type CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; \ - type CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;\ - type CM_GAMUT_REMAP_MODE_CURRENT;\ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G; \ - type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G; \ - type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R; \ - type CM_BLNDGAM_LUT_WRITE_COLOR_MASK; \ - type CM_BLNDGAM_LUT_HOST_SEL; \ - type CM_BLNDGAM_LUT_CONFIG_MODE; \ - type CM_3DLUT_MODE_CURRENT; \ - type CM_SHAPER_MODE_CURRENT; \ - type CM_BLNDGAM_MODE; \ - type CM_BLNDGAM_MODE_CURRENT; \ - type CM_BLNDGAM_SELECT_CURRENT; \ - type CM_BLNDGAM_SELECT; \ - type GAMCOR_MEM_PWR_STATE; \ - type BLNDGAM_MEM_PWR_STATE; \ - type HDR3DLUT_MEM_PWR_STATE; \ - type SHAPER_MEM_PWR_STATE - -struct dcn3_dpp_shift { - DPP_REG_FIELD_LIST_DCN3(uint8_t); -}; - -struct dcn3_dpp_mask { - DPP_REG_FIELD_LIST_DCN3(uint32_t); -}; - -#define DPP_DCN3_REG_VARIABLE_LIST_COMMON \ - DPP_DCN2_REG_VARIABLE_LIST; \ - uint32_t CM_MEM_PWR_STATUS;\ - uint32_t CM_MEM_PWR_STATUS2;\ - uint32_t CM_MEM_PWR_CTRL2;\ - uint32_t CM_DEALPHA;\ - uint32_t CM_BIAS_CR_R;\ - uint32_t CM_BIAS_Y_G_CB_B;\ - uint32_t PRE_DEGAM;\ - uint32_t PRE_DEALPHA; \ - uint32_t PRE_REALPHA; \ - uint32_t PRE_CSC_MODE; \ - uint32_t PRE_CSC_C11_C12; \ - uint32_t PRE_CSC_C33_C34; \ - uint32_t PRE_CSC_B_C11_C12; \ - uint32_t PRE_CSC_B_C33_C34; \ - uint32_t CM_POST_CSC_CONTROL; \ - uint32_t CM_POST_CSC_C11_C12; \ - uint32_t CM_POST_CSC_C33_C34; \ - uint32_t CM_POST_CSC_B_C11_C12; \ - uint32_t CM_POST_CSC_B_C33_C34; \ - uint32_t CM_GAMUT_REMAP_B_C11_C12; \ - uint32_t CM_GAMUT_REMAP_B_C13_C14; \ - uint32_t CM_GAMUT_REMAP_B_C21_C22; \ - uint32_t CM_GAMUT_REMAP_B_C23_C24; \ - uint32_t CM_GAMUT_REMAP_B_C31_C32; \ - uint32_t CM_GAMUT_REMAP_B_C33_C34; \ - uint32_t CM_GAMCOR_CONTROL; \ - uint32_t CM_GAMCOR_LUT_CONTROL; \ - uint32_t CM_GAMCOR_LUT_INDEX; \ - uint32_t CM_GAMCOR_LUT_DATA; \ - uint32_t CM_GAMCOR_RAMB_START_CNTL_B; \ - uint32_t CM_GAMCOR_RAMB_START_CNTL_G; \ - uint32_t CM_GAMCOR_RAMB_START_CNTL_R; \ - uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_B; \ - uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_G; \ - uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_R; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL1_B; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL2_B; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL1_G; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL2_G; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL1_R; \ - uint32_t CM_GAMCOR_RAMB_END_CNTL2_R; \ - uint32_t CM_GAMCOR_RAMB_REGION_0_1; \ - uint32_t CM_GAMCOR_RAMB_REGION_32_33; \ - uint32_t CM_GAMCOR_RAMB_OFFSET_B; \ - uint32_t CM_GAMCOR_RAMB_OFFSET_G; \ - uint32_t CM_GAMCOR_RAMB_OFFSET_R; \ - uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_B; \ - uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_G; \ - uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_R; \ - uint32_t CM_GAMCOR_RAMA_START_CNTL_B; \ - uint32_t CM_GAMCOR_RAMA_START_CNTL_G; \ - uint32_t CM_GAMCOR_RAMA_START_CNTL_R; \ - uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_B; \ - uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_G; \ - uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_R; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL1_B; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL2_B; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL1_G; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL2_G; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL1_R; \ - uint32_t CM_GAMCOR_RAMA_END_CNTL2_R; \ - uint32_t CM_GAMCOR_RAMA_REGION_0_1; \ - uint32_t CM_GAMCOR_RAMA_REGION_32_33; \ - uint32_t CM_GAMCOR_RAMA_OFFSET_B; \ - uint32_t CM_GAMCOR_RAMA_OFFSET_G; \ - uint32_t CM_GAMCOR_RAMA_OFFSET_R; \ - uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_B; \ - uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_G; \ - uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R; \ - uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B; \ - uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G; \ - uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R; \ - uint32_t CM_BLNDGAM_LUT_CONTROL - - -struct dcn3_dpp_registers { - DPP_DCN3_REG_VARIABLE_LIST_COMMON; -}; - - -struct dcn3_dpp { - struct dpp base; - - const struct dcn3_dpp_registers *tf_regs; - const struct dcn3_dpp_shift *tf_shift; - const struct dcn3_dpp_mask *tf_mask; - - const uint16_t *filter_v; - const uint16_t *filter_h; - const uint16_t *filter_v_c; - const uint16_t *filter_h_c; - int lb_pixel_depth_supported; - int lb_memory_size; - int lb_bits_per_entry; - bool is_write_to_ram_a_safe; - struct scaler_data scl_data; - struct pwl_params pwl_data; -}; - -bool dpp3_construct(struct dcn3_dpp *dpp3, - struct dc_context *ctx, - uint32_t inst, - const struct dcn3_dpp_registers *tf_regs, - const struct dcn3_dpp_shift *tf_shift, - const struct dcn3_dpp_mask *tf_mask); - -bool dpp3_program_gamcor_lut( - struct dpp *dpp_base, const struct pwl_params *params); - -void dpp3_program_CM_dealpha( - struct dpp *dpp_base, - uint32_t enable, uint32_t additive_blending); - -void dpp30_read_state(struct dpp *dpp_base, - struct dcn_dpp_state *s); - -bool dpp3_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps); - -void dpp3_cnv_setup ( - struct dpp *dpp_base, - enum surface_pixel_format format, - enum expansion_mode mode, - struct dc_csc_transform input_csc_color_matrix, - enum dc_color_space input_color_space, - struct cnv_alpha_2bit_lut *alpha_2bit_lut); - -void dpp3_program_CM_bias( - struct dpp *dpp_base, - struct CM_bias_params *bias_params); - -void dpp3_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier); - -void dpp3_cm_set_gamut_remap( - struct dpp *dpp_base, - const struct dpp_grph_csc_adjustment *adjust); - -void dpp3_set_pre_degam(struct dpp *dpp_base, - enum dc_transfer_func_predefined tr); - -void dpp3_set_cursor_attributes( - struct dpp *dpp_base, - struct dc_cursor_attributes *cursor_attributes); - -void dpp3_program_post_csc( - struct dpp *dpp_base, - enum dc_color_space color_space, - enum dcn10_input_csc_select input_select, - const struct out_csc_color_matrix *tbl_entry); - -void dpp3_program_cm_bias( - struct dpp *dpp_base, - struct CM_bias_params *bias_params); - -void dpp3_program_cm_dealpha( - struct dpp *dpp_base, - uint32_t enable, uint32_t additive_blending); - -void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust); -#endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c deleted file mode 100644 index 2f5b3fbd3507..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c +++ /dev/null @@ -1,461 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "core_types.h" -#include "reg_helper.h" -#include "dcn30_dpp.h" -#include "basics/conversion.h" -#include "dcn30_cm_common.h" - -#define REG(reg)\ - dpp->tf_regs->reg - -#define CTX \ - dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - dpp->tf_shift->field_name, dpp->tf_mask->field_name - -static void dpp3_enable_cm_block( - struct dpp *dpp_base) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - unsigned int cm_bypass_mode = 0; - - // debug option: put CM in bypass mode - if (dpp_base->ctx->dc->debug.cm_in_bypass) - cm_bypass_mode = 1; - - REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode); -} - -static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base) -{ - enum dc_lut_mode mode = LUT_BYPASS; - uint32_t state_mode; - uint32_t lut_mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode); - - if (state_mode == 2) {//Programmable RAM LUT - REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode); - if (lut_mode == 0) - mode = LUT_RAM_A; - else - mode = LUT_RAM_B; - } - - return mode; -} - -static void dpp3_program_gammcor_lut( - struct dpp *dpp_base, - const struct pwl_result_data *rgb, - uint32_t num, - bool is_ram_a) -{ - uint32_t i; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; - uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; - uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; - - /*fill in the LUT with all base values to be used by pwl module - * HW auto increments the LUT index: back-to-back write - */ - if (is_rgb_equal(rgb, num)) { - for (i = 0 ; i < num; i++) - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg); - - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red); - - } else { - REG_UPDATE(CM_GAMCOR_LUT_CONTROL, - CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4); - for (i = 0 ; i < num; i++) - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg); - - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red); - - REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); - - REG_UPDATE(CM_GAMCOR_LUT_CONTROL, - CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2); - for (i = 0 ; i < num; i++) - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg); - - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green); - - REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); - - REG_UPDATE(CM_GAMCOR_LUT_CONTROL, - CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1); - for (i = 0 ; i < num; i++) - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg); - - REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue); - } -} - -static void dpp3_power_on_gamcor_lut( - struct dpp *dpp_base, - bool power_on) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { - if (power_on) { - REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0); - REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5); - } else { - dpp_base->ctx->dc->optimized_required = true; - dpp_base->deferred_reg_writes.bits.disable_gamcor = true; - } - } else - REG_SET(CM_MEM_PWR_CTRL, 0, - GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1); -} - -void dpp3_program_cm_dealpha( - struct dpp *dpp_base, - uint32_t enable, uint32_t additive_blending) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_SET_2(CM_DEALPHA, 0, - CM_DEALPHA_EN, enable, - CM_DEALPHA_ABLND, additive_blending); -} - -void dpp3_program_cm_bias( - struct dpp *dpp_base, - struct CM_bias_params *bias_params) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r); - REG_SET_2(CM_BIAS_Y_G_CB_B, 0, - CM_BIAS_Y_G, bias_params->cm_bias_y_g, - CM_BIAS_CB_B, bias_params->cm_bias_cb_b); -} - -static void dpp3_gamcor_reg_field( - struct dcn3_dpp *dpp, - struct dcn3_xfer_func_reg *reg) -{ - - reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; - reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; - reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B; - reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B; - - reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; - reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; - reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; - reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; - reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; - reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS; - reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS; - - reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B; - reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B; - reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; - reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; - reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; - reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; - reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; - reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; - reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B; - reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B; - reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; - reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; -} - -static void dpp3_configure_gamcor_lut( - struct dpp *dpp_base, - bool is_ram_a) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE(CM_GAMCOR_LUT_CONTROL, - CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7); - REG_UPDATE(CM_GAMCOR_LUT_CONTROL, - CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1); - REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); -} - - -bool dpp3_program_gamcor_lut( - struct dpp *dpp_base, const struct pwl_params *params) -{ - enum dc_lut_mode current_mode; - enum dc_lut_mode next_mode; - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - struct dcn3_xfer_func_reg gam_regs; - - dpp3_enable_cm_block(dpp_base); - - if (params == NULL) { //bypass if we have no pwl data - REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0); - if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) - dpp3_power_on_gamcor_lut(dpp_base, false); - return false; - } - dpp3_power_on_gamcor_lut(dpp_base, true); - REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2); - - current_mode = dpp30_get_gamcor_current(dpp_base); - if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) - next_mode = LUT_RAM_B; - else - next_mode = LUT_RAM_A; - - dpp3_power_on_gamcor_lut(dpp_base, true); - dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A); - - if (next_mode == LUT_RAM_B) { - gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R); - gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1); - gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33); - //New registers in DCN3AG/DCN GAMCOR block - gam_regs.offset_b = REG(CM_GAMCOR_RAMB_OFFSET_B); - gam_regs.offset_g = REG(CM_GAMCOR_RAMB_OFFSET_G); - gam_regs.offset_r = REG(CM_GAMCOR_RAMB_OFFSET_R); - gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B); - gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G); - gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R); - } else { - gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B); - gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G); - gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R); - gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B); - gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G); - gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R); - gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B); - gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B); - gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G); - gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G); - gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R); - gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R); - gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1); - gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33); - //New registers in DCN3AG/DCN GAMCOR block - gam_regs.offset_b = REG(CM_GAMCOR_RAMA_OFFSET_B); - gam_regs.offset_g = REG(CM_GAMCOR_RAMA_OFFSET_G); - gam_regs.offset_r = REG(CM_GAMCOR_RAMA_OFFSET_R); - gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B); - gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G); - gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R); - } - - //get register fields - dpp3_gamcor_reg_field(dpp, &gam_regs); - - //program register set for LUTA/LUTB - cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs); - - dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num, - next_mode == LUT_RAM_A); - - //select Gamma LUT to use for next frame - REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1); - - return true; -} - -void dpp3_set_hdr_multiplier( - struct dpp *dpp_base, - uint32_t multiplier) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - - REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); -} - - -static void program_gamut_remap( - struct dcn3_dpp *dpp, - const uint16_t *regval, - int select) -{ - uint16_t selection = 0; - struct color_matrices_reg gam_regs; - - if (regval == NULL || select == GAMUT_REMAP_BYPASS) { - REG_SET(CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, 0); - return; - } - switch (select) { - case GAMUT_REMAP_COEFF: - selection = 1; - break; - /*this corresponds to GAMUT_REMAP coefficients set B - *we don't have common coefficient sets in dcn3ag/dcn3 - */ - case GAMUT_REMAP_COMA_COEFF: - selection = 2; - break; - default: - break; - } - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - - if (select == GAMUT_REMAP_COEFF) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } else if (select == GAMUT_REMAP_COMA_COEFF) { - - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); - - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - - } - //select coefficient set to use - REG_SET( - CM_GAMUT_REMAP_CONTROL, 0, - CM_GAMUT_REMAP_MODE, selection); -} - -void dpp3_cm_set_gamut_remap( - struct dpp *dpp_base, - const struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - int i = 0; - int gamut_mode; - - if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) - /* Bypass if type is bypass or hw */ - program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS); - else { - struct fixed31_32 arr_matrix[12]; - uint16_t arr_reg_val[12]; - - for (i = 0; i < 12; i++) - arr_matrix[i] = adjust->temperature_matrix[i]; - - convert_float_matrix( - arr_reg_val, arr_matrix, 12); - - //current coefficient set in use - REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode); - - if (gamut_mode == 0) - gamut_mode = 1; //use coefficient set A - else if (gamut_mode == 1) - gamut_mode = 2; - else - gamut_mode = 1; - - //follow dcn2 approach for now - using only coefficient set A - program_gamut_remap(dpp, arr_reg_val, gamut_mode); - } -} - -static void read_gamut_remap(struct dcn3_dpp *dpp, - uint16_t *regval, - int *select) -{ - struct color_matrices_reg gam_regs; - uint32_t selection; - - //current coefficient set in use - REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection); - - *select = selection; - - gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; - gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; - gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; - gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; - - if (*select == GAMUT_REMAP_COEFF) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); - - cm_helper_read_color_matrices(dpp->base.ctx, - regval, - &gam_regs); - - } else if (*select == GAMUT_REMAP_COMA_COEFF) { - gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); - gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); - - cm_helper_read_color_matrices(dpp->base.ctx, - regval, - &gam_regs); - } -} - -void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, - struct dpp_grph_csc_adjustment *adjust) -{ - struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - uint16_t arr_reg_val[12]; - int select; - - read_gamut_remap(dpp, arr_reg_val, &select); - - if (select == GAMUT_REMAP_BYPASS) { - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - return; - } - - adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - convert_hw_matrix(adjust->temperature_matrix, - arr_reg_val, ARRAY_SIZE(arr_reg_val)); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile index 5314770fff1c..a58c37165f5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile @@ -11,7 +11,7 @@ # Makefile for dcn32. DCN32 = dcn32_hubbub.o dcn32_dccg.o \ - dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \ + dcn32_mmhubbub.o dcn32_hubp.o dcn32_mpc.o \ dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \ dcn32_hpo_dp_link_encoder.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c deleted file mode 100644 index 681e75c6dbaf..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.c +++ /dev/null @@ -1,165 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "core_types.h" -#include "reg_helper.h" -#include "dcn32_dpp.h" -#include "basics/conversion.h" -#include "dcn30/dcn30_cm_common.h" - -/* Compute the maximum number of lines that we can fit in the line buffer */ -static void dscl32_calc_lb_num_partitions( - const struct scaler_data *scl_data, - enum lb_memory_config lb_config, - int *num_part_y, - int *num_part_c) -{ - int memory_line_size_y, memory_line_size_c, memory_line_size_a, - lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; - - int line_size = scl_data->viewport.width < scl_data->recout.width ? - scl_data->viewport.width : scl_data->recout.width; - int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? - scl_data->viewport_c.width : scl_data->recout.width; - - if (line_size == 0) - line_size = 1; - - if (line_size_c == 0) - line_size_c = 1; - - memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ - memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ - memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ - - if (lb_config == LB_MEMORY_CONFIG_1) { - lb_memory_size = 970; - lb_memory_size_c = 970; - lb_memory_size_a = 970; - } else if (lb_config == LB_MEMORY_CONFIG_2) { - lb_memory_size = 1290; - lb_memory_size_c = 1290; - lb_memory_size_a = 1290; - } else if (lb_config == LB_MEMORY_CONFIG_3) { - if (scl_data->viewport.width == scl_data->h_active && - scl_data->viewport.height == scl_data->v_active) { - /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ - /* use increased LB size for calculation only if Scaler not enabled */ - lb_memory_size = 970 + 1290 + 1170 + 1170 + 1170; - lb_memory_size_c = 970 + 1290; - lb_memory_size_a = 970 + 1290 + 1170; - } else { - /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ - lb_memory_size = 970 + 1290 + 484 + 484 + 484; - lb_memory_size_c = 970 + 1290; - lb_memory_size_a = 970 + 1290 + 484; - } - } else { - if (scl_data->viewport.width == scl_data->h_active && - scl_data->viewport.height == scl_data->v_active) { - /* use increased LB size for calculation only if Scaler not enabled */ - lb_memory_size = 970 + 1290 + 1170; - lb_memory_size_c = 970 + 1290 + 1170; - lb_memory_size_a = 970 + 1290 + 1170; - } else { - lb_memory_size = 970 + 1290 + 484; - lb_memory_size_c = 970 + 1290 + 484; - lb_memory_size_a = 970 + 1290 + 484; - } - } - *num_part_y = lb_memory_size / memory_line_size_y; - *num_part_c = lb_memory_size_c / memory_line_size_c; - num_partitions_a = lb_memory_size_a / memory_line_size_a; - - if (scl_data->lb_params.alpha_en - && (num_partitions_a < *num_part_y)) - *num_part_y = num_partitions_a; - - if (*num_part_y > 32) - *num_part_y = 32; - if (*num_part_c > 32) - *num_part_c = 32; -} - -static struct dpp_funcs dcn32_dpp_funcs = { - .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, - .dpp_read_state = dpp30_read_state, - .dpp_reset = dpp_reset, - .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, - .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, - .dpp_set_csc_adjustment = NULL, - .dpp_set_csc_default = NULL, - .dpp_program_regamma_pwl = NULL, - .dpp_set_pre_degam = dpp3_set_pre_degam, - .dpp_program_input_lut = NULL, - .dpp_full_bypass = dpp1_full_bypass, - .dpp_setup = dpp3_cnv_setup, - .dpp_program_degamma_pwl = NULL, - .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, - .dpp_program_cm_bias = dpp3_program_cm_bias, - - .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP - .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) - .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) - - .dpp_program_bias_and_scale = NULL, - .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, - .set_cursor_attributes = dpp3_set_cursor_attributes, - .set_cursor_position = dpp1_set_cursor_position, - .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, - .dpp_dppclk_control = dpp1_dppclk_control, - .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, - .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, -}; - - -static struct dpp_caps dcn32_dpp_cap = { - .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, - .max_lb_partitions = 31, - .dscl_calc_lb_num_partitions = dscl32_calc_lb_num_partitions, -}; - -bool dpp32_construct( - struct dcn3_dpp *dpp, - struct dc_context *ctx, - uint32_t inst, - const struct dcn3_dpp_registers *tf_regs, - const struct dcn3_dpp_shift *tf_shift, - const struct dcn3_dpp_mask *tf_mask) -{ - dpp->base.ctx = ctx; - - dpp->base.inst = inst; - dpp->base.funcs = &dcn32_dpp_funcs; - dpp->base.caps = &dcn32_dpp_cap; - - dpp->tf_regs = tf_regs; - dpp->tf_shift = tf_shift; - dpp->tf_mask = tf_mask; - - return true; -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h deleted file mode 100644 index 572958d287eb..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dpp.h +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN32_DPP_H__ -#define __DCN32_DPP_H__ - -#include "dcn20/dcn20_dpp.h" -#include "dcn30/dcn30_dpp.h" - -bool dpp32_construct(struct dcn3_dpp *dpp3, - struct dc_context *ctx, - uint32_t inst, - const struct dcn3_dpp_registers *tf_regs, - const struct dcn3_dpp_shift *tf_shift, - const struct dcn3_dpp_mask *tf_mask); - -#endif /* __DCN32_DPP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile index 0e317e0c36a0..d5b4533d2f62 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile @@ -13,7 +13,7 @@ DCN35 = dcn35_dio_stream_encoder.o \ dcn35_dio_link_encoder.o dcn35_dccg.o \ dcn35_hubp.o dcn35_hubbub.o \ - dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o + dcn35_mmhubbub.o dcn35_opp.o dcn35_pg_cntl.o dcn35_dwb.o AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c deleted file mode 100644 index 3341ef71009b..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.c +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "core_types.h" -#include "dcn35_dpp.h" -#include "reg_helper.h" - -#define REG(reg) dpp->tf_regs->reg - -#define CTX dpp->base.ctx - -#undef FN -#define FN(reg_name, field_name) \ - ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \ - ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name - -bool dpp35_construct(struct dcn3_dpp *dpp, struct dc_context *ctx, - uint32_t inst, const struct dcn3_dpp_registers *tf_regs, - const struct dcn35_dpp_shift *tf_shift, - const struct dcn35_dpp_mask *tf_mask) -{ - return dpp32_construct(dpp, ctx, inst, tf_regs, - (const struct dcn3_dpp_shift *)(tf_shift), - (const struct dcn3_dpp_mask *)(tf_mask)); -} - -void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable) -{ - REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable); -} diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h deleted file mode 100644 index 09b84307cd9e..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dpp.h +++ /dev/null @@ -1,57 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright 2023 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef __DCN35_DPP_H__ -#define __DCN35_DPP_H__ - -#include "dcn32/dcn32_dpp.h" - -#define DPP_REG_LIST_SH_MASK_DCN35(mask_sh) \ - DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \ - TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh) - -#define DPP_REG_FIELD_LIST_DCN35(type) \ - struct { \ - DPP_REG_FIELD_LIST_DCN3(type); \ - type DPP_FGCG_REP_DIS; \ - } - -struct dcn35_dpp_shift { - DPP_REG_FIELD_LIST_DCN35(uint8_t); -}; - -struct dcn35_dpp_mask { - DPP_REG_FIELD_LIST_DCN35(uint32_t); -}; - -bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx, - uint32_t inst, const struct dcn3_dpp_registers *tf_regs, - const struct dcn35_dpp_shift *tf_shift, - const struct dcn35_dpp_mask *tf_mask); - -void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable); - -#endif // __DCN35_DPP_H diff --git a/drivers/gpu/drm/amd/display/dc/dpp/Makefile b/drivers/gpu/drm/amd/display/dc/dpp/Makefile new file mode 100644 index 000000000000..99bd36073561 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/Makefile @@ -0,0 +1,77 @@ + +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'dpp' sub-component of DAL. +# +ifdef CONFIG_DRM_AMD_DC_FP +############################################################################### +# DCN +############################################################################### + +DPP_DCN10 = dcn10_dpp.o dcn10_dpp_dscl.o dcn10_dpp_cm.o + +AMD_DAL_DPP_DCN10 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn10/,$(DPP_DCN10)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN10) + +############################################################################### + +DPP_DCN20 = dcn20_dpp.o dcn20_dpp_cm.o + +AMD_DAL_DPP_DCN20 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn20/,$(DPP_DCN20)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN20) + +############################################################################### + +DPP_DCN201 = dcn201_dpp.o + +AMD_DAL_DPP_DCN201 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn201/,$(DPP_DCN201)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN201) + +############################################################################### + +DPP_DCN30 = dcn30_dpp.o dcn30_dpp_cm.o + +AMD_DAL_DPP_DCN30 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn30/,$(DPP_DCN30)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN30) + +############################################################################### + +DPP_DCN32 = dcn32_dpp.o + +AMD_DAL_DPP_DCN32 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn32/,$(DPP_DCN32)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN32) + +############################################################################### + +DPP_DCN35 = dcn35_dpp.o + +AMD_DAL_DPP_DCN35 = $(addprefix $(AMDDALPATH)/dc/dpp/dcn35/,$(DPP_DCN35)) + +AMD_DISPLAY_FILES += $(AMD_DAL_DPP_DCN35) + +############################################################################### + +endif \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt new file mode 100644 index 000000000000..1318c6fba3e7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/CMakeLists.txt @@ -0,0 +1,6 @@ +dal3_subdirectory_sources( + dcn10_dpp.c + dcn10_dpp_cm.c + dcn10_dpp_dscl.c + dcn10_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c new file mode 100644 index 000000000000..e1da48b05d00 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c @@ -0,0 +1,585 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn10/dcn10_dpp.h" +#include "basics/conversion.h" + +#define NUM_PHASES 64 +#define HORZ_MAX_TAPS 8 +#define VERT_MAX_TAPS 8 + +#define BLACK_OFFSET_RGB_Y 0x0 +#define BLACK_OFFSET_CBCR 0x8000 + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +enum pixel_format_description { + PIXEL_FORMAT_FIXED = 0, + PIXEL_FORMAT_FIXED16, + PIXEL_FORMAT_FLOAT + +}; + +enum dcn10_coef_filter_type_sel { + SCL_COEF_LUMA_VERT_FILTER = 0, + SCL_COEF_LUMA_HORZ_FILTER = 1, + SCL_COEF_CHROMA_VERT_FILTER = 2, + SCL_COEF_CHROMA_HORZ_FILTER = 3, + SCL_COEF_ALPHA_VERT_FILTER = 4, + SCL_COEF_ALPHA_HORZ_FILTER = 5 +}; + +enum dscl_autocal_mode { + AUTOCAL_MODE_OFF = 0, + + /* Autocal calculate the scaling ratio and initial phase and the + * DSCL_MODE_SEL must be set to 1 + */ + AUTOCAL_MODE_AUTOSCALE = 1, + /* Autocal perform auto centering without replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOCENTER = 2, + /* Autocal perform auto centering and auto replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOREPLICATE = 3 +}; + +enum dscl_mode_sel { + DSCL_MODE_SCALING_444_BYPASS = 0, + DSCL_MODE_SCALING_444_RGB_ENABLE = 1, + DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, + DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, + DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, + DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, + DSCL_MODE_DSCL_BYPASS = 6 +}; + +void dpp_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_GET(DPP_CONTROL, + DPP_CLOCK_ENABLE, &s->is_enabled); + REG_GET(CM_IGAM_CONTROL, + CM_IGAM_LUT_MODE, &s->igam_lut_mode); + REG_GET(CM_IGAM_CONTROL, + CM_IGAM_INPUT_FORMAT, &s->igam_input_format); + REG_GET(CM_DGAM_CONTROL, + CM_DGAM_LUT_MODE, &s->dgam_lut_mode); + REG_GET(CM_RGAM_CONTROL, + CM_RGAM_LUT_MODE, &s->rgam_lut_mode); + REG_GET(CM_GAMUT_REMAP_CONTROL, + CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode); + + if (s->gamut_remap_mode) { + s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12); + s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14); + s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22); + s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24); + s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32); + s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34); + } +} + +#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) + +bool dpp1_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps) +{ + /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ + if (scl_data->format == PIXEL_FORMAT_FP16 && + dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && + scl_data->ratios.horz.value != dc_fixpt_one.value && + scl_data->ratios.vert.value != dc_fixpt_one.value) + return false; + + if (scl_data->viewport.width > scl_data->h_active && + dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; + + /* TODO: add lb check */ + + /* No support for programming ratio of 4, drop to 3.99999.. */ + if (scl_data->ratios.horz.value == (4ll << 32)) + scl_data->ratios.horz.value--; + if (scl_data->ratios.vert.value == (4ll << 32)) + scl_data->ratios.vert.value--; + if (scl_data->ratios.horz_c.value == (4ll << 32)) + scl_data->ratios.horz_c.value--; + if (scl_data->ratios.vert_c.value == (4ll << 32)) + scl_data->ratios.vert_c.value--; + + /* Set default taps if none are provided */ + if (in_taps->h_taps == 0) + scl_data->taps.h_taps = 4; + else + scl_data->taps.h_taps = in_taps->h_taps; + if (in_taps->v_taps == 0) + scl_data->taps.v_taps = 4; + else + scl_data->taps.v_taps = in_taps->v_taps; + if (in_taps->v_taps_c == 0) + scl_data->taps.v_taps_c = 2; + else + scl_data->taps.v_taps_c = in_taps->v_taps_c; + if (in_taps->h_taps_c == 0) + scl_data->taps.h_taps_c = 2; + /* Only 1 and even h_taps_c are supported by hw */ + else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; + else + scl_data->taps.h_taps_c = in_taps->h_taps_c; + + if (!dpp->ctx->dc->debug.always_scale) { + if (IDENTITY_RATIO(scl_data->ratios.horz)) + scl_data->taps.h_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert)) + scl_data->taps.v_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.horz_c)) + scl_data->taps.h_taps_c = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert_c)) + scl_data->taps.v_taps_c = 1; + } + + return true; +} + +void dpp_reset(struct dpp *dpp_base) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + dpp->filter_h_c = NULL; + dpp->filter_v_c = NULL; + dpp->filter_h = NULL; + dpp->filter_v = NULL; + + memset(&dpp->scl_data, 0, sizeof(dpp->scl_data)); + memset(&dpp->pwl_data, 0, sizeof(dpp->pwl_data)); +} + + + +static void dpp1_cm_set_regamma_pwl( + struct dpp *dpp_base, const struct pwl_params *params, enum opp_regamma mode) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + uint32_t re_mode = 0; + + switch (mode) { + case OPP_REGAMMA_BYPASS: + re_mode = 0; + break; + case OPP_REGAMMA_SRGB: + re_mode = 1; + break; + case OPP_REGAMMA_XVYCC: + re_mode = 2; + break; + case OPP_REGAMMA_USER: + re_mode = dpp->is_write_to_ram_a_safe ? 4 : 3; + if (memcmp(&dpp->pwl_data, params, sizeof(*params)) == 0) + break; + + dpp1_cm_power_on_regamma_lut(dpp_base, true); + dpp1_cm_configure_regamma_lut(dpp_base, dpp->is_write_to_ram_a_safe); + + if (dpp->is_write_to_ram_a_safe) + dpp1_cm_program_regamma_luta_settings(dpp_base, params); + else + dpp1_cm_program_regamma_lutb_settings(dpp_base, params); + + dpp1_cm_program_regamma_lut(dpp_base, params->rgb_resulted, + params->hw_points_num); + dpp->pwl_data = *params; + + re_mode = dpp->is_write_to_ram_a_safe ? 3 : 4; + dpp->is_write_to_ram_a_safe = !dpp->is_write_to_ram_a_safe; + break; + default: + break; + } + REG_SET(CM_RGAM_CONTROL, 0, CM_RGAM_LUT_MODE, re_mode); +} + +static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\ + enum pixel_format_description *fmt) +{ + + if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F || + input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) + *fmt = PIXEL_FORMAT_FLOAT; + else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 || + input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616) + *fmt = PIXEL_FORMAT_FIXED16; + else + *fmt = PIXEL_FORMAT_FIXED; +} + +static void dpp1_set_degamma_format_float( + struct dpp *dpp_base, + bool is_float) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + if (is_float) { + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 3); + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 1); + } else { + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 2); + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, 0); + } +} + +void dpp1_cnv_setup ( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut) +{ + uint32_t pixel_format; + uint32_t alpha_en; + enum pixel_format_description fmt ; + enum dc_color_space color_space; + enum dcn10_input_csc_select select; + bool is_float; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + bool force_disable_cursor = false; + struct out_csc_color_matrix tbl_entry; + int i = 0; + + dpp1_setup_format_flags(format, &fmt); + alpha_en = 1; + pixel_format = 0; + color_space = COLOR_SPACE_SRGB; + select = INPUT_CSC_SELECT_BYPASS; + is_float = false; + + switch (fmt) { + case PIXEL_FORMAT_FIXED: + case PIXEL_FORMAT_FIXED16: + /*when output is float then FORMAT_CONTROL__OUTPUT_FP=1*/ + REG_SET_3(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode, + OUTPUT_FP, 0); + break; + case PIXEL_FORMAT_FLOAT: + REG_SET_3(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode, + OUTPUT_FP, 1); + is_float = true; + break; + default: + + break; + } + + dpp1_set_degamma_format_float(dpp_base, is_float); + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + pixel_format = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + pixel_format = 3; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + pixel_format = 8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + pixel_format = 10; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + force_disable_cursor = false; + pixel_format = 65; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + force_disable_cursor = true; + pixel_format = 64; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + force_disable_cursor = true; + pixel_format = 67; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + force_disable_cursor = true; + pixel_format = 66; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + pixel_format = 26; /* ARGB16161616_UNORM */ + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + pixel_format = 24; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + pixel_format = 25; + break; + default: + break; + } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, pixel_format); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); + + // if input adjustments exist, program icsc with those values + + if (input_csc_color_matrix.enable_adjustment + == true) { + for (i = 0; i < 12; i++) + tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; + + tbl_entry.color_space = color_space; + + if (color_space >= COLOR_SPACE_YCBCR601) + select = INPUT_CSC_SELECT_ICSC; + else + select = INPUT_CSC_SELECT_BYPASS; + + dpp1_program_input_csc(dpp_base, color_space, select, &tbl_entry); + } else + dpp1_program_input_csc(dpp_base, color_space, select, NULL); + + if (force_disable_cursor) { + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, 0); + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, 0); + } +} + +void dpp1_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes) +{ + enum dc_cursor_color_format color_format = cursor_attributes->color_format; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_UPDATE_2(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0); + + if (color_format == CURSOR_MODE_MONO) { + /* todo: clarify what to program these to */ + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } +} + + +void dpp1_set_cursor_position( + struct dpp *dpp_base, + const struct dc_cursor_position *pos, + const struct dc_cursor_mi_param *param, + uint32_t width, + uint32_t height) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + int x_pos = pos->x - param->viewport.x; + int y_pos = pos->y - param->viewport.y; + int x_hotspot = pos->x_hotspot; + int y_hotspot = pos->y_hotspot; + int src_x_offset = x_pos - pos->x_hotspot; + int src_y_offset = y_pos - pos->y_hotspot; + int cursor_height = (int)height; + int cursor_width = (int)width; + uint32_t cur_en = pos->enable ? 1 : 0; + + // Transform cursor width / height and hotspots for offset calculations + if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) { + swap(cursor_height, cursor_width); + swap(x_hotspot, y_hotspot); + + if (param->rotation == ROTATION_ANGLE_90) { + // hotspot = (-y, x) + src_x_offset = x_pos - (cursor_width - x_hotspot); + src_y_offset = y_pos - y_hotspot; + } else if (param->rotation == ROTATION_ANGLE_270) { + // hotspot = (y, -x) + src_x_offset = x_pos - x_hotspot; + src_y_offset = y_pos - (cursor_height - y_hotspot); + } + } else if (param->rotation == ROTATION_ANGLE_180) { + // hotspot = (-x, -y) + if (!param->mirror) + src_x_offset = x_pos - (cursor_width - x_hotspot); + + src_y_offset = y_pos - (cursor_height - y_hotspot); + } + + if (src_x_offset >= (int)param->viewport.width) + cur_en = 0; /* not visible beyond right edge*/ + + if (src_x_offset + cursor_width <= 0) + cur_en = 0; /* not visible beyond left edge*/ + + if (src_y_offset >= (int)param->viewport.height) + cur_en = 0; /* not visible beyond bottom edge*/ + + if (src_y_offset + cursor_height <= 0) + cur_en = 0; /* not visible beyond top edge*/ + + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, cur_en); + + dpp_base->pos.cur0_ctl.bits.cur0_enable = cur_en; +} + +void dpp1_cnv_set_optional_cursor_attributes( + struct dpp *dpp_base, + struct dpp_cursor_attributes *attr) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + if (attr) { + REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, attr->bias); + REG_UPDATE(CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, attr->scale); + } +} + +void dpp1_dppclk_control( + struct dpp *dpp_base, + bool dppclk_div, + bool enable) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + if (enable) { + if (dpp->tf_mask->DPPCLK_RATE_CONTROL) + REG_UPDATE_2(DPP_CONTROL, + DPPCLK_RATE_CONTROL, dppclk_div, + DPP_CLOCK_ENABLE, 1); + else + REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 1); + } else + REG_UPDATE(DPP_CONTROL, DPP_CLOCK_ENABLE, 0); +} + +static const struct dpp_funcs dcn10_dpp_funcs = { + .dpp_read_state = dpp_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, + .dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment, + .dpp_set_csc_default = dpp1_cm_set_output_csc_default, + .dpp_power_on_regamma_lut = dpp1_cm_power_on_regamma_lut, + .dpp_program_regamma_lut = dpp1_cm_program_regamma_lut, + .dpp_configure_regamma_lut = dpp1_cm_configure_regamma_lut, + .dpp_program_regamma_lutb_settings = dpp1_cm_program_regamma_lutb_settings, + .dpp_program_regamma_luta_settings = dpp1_cm_program_regamma_luta_settings, + .dpp_program_regamma_pwl = dpp1_cm_set_regamma_pwl, + .dpp_program_bias_and_scale = dpp1_program_bias_and_scale, + .dpp_set_degamma = dpp1_set_degamma, + .dpp_program_input_lut = dpp1_program_input_lut, + .dpp_program_degamma_pwl = dpp1_set_degamma_pwl, + .dpp_setup = dpp1_cnv_setup, + .dpp_full_bypass = dpp1_full_bypass, + .set_cursor_attributes = dpp1_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp1_set_hdr_multiplier, + .dpp_program_blnd_lut = NULL, + .dpp_program_shaper_lut = NULL, + .dpp_program_3dlut = NULL, + .dpp_get_gamut_remap = dpp1_cm_get_gamut_remap, +}; + +static struct dpp_caps dcn10_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FIXED_FORMAT, + .dscl_calc_lb_num_partitions = dpp1_dscl_calc_lb_num_partitions, +}; + +/*****************************************/ +/* Constructor, Destructor */ +/*****************************************/ + +void dpp1_construct( + struct dcn10_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn_dpp_registers *tf_regs, + const struct dcn_dpp_shift *tf_shift, + const struct dcn_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn10_dpp_funcs; + dpp->base.caps = &dcn10_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + dpp->lb_pixel_depth_supported = + LB_PIXEL_DEPTH_18BPP | + LB_PIXEL_DEPTH_24BPP | + LB_PIXEL_DEPTH_30BPP | + LB_PIXEL_DEPTH_36BPP; + + dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; + dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/ +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h new file mode 100644 index 000000000000..a039eedc7c24 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h @@ -0,0 +1,1527 @@ +/* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DPP_DCN10_H__ +#define __DAL_DPP_DCN10_H__ + +#include "dpp.h" + +#define TO_DCN10_DPP(dpp)\ + container_of(dpp, struct dcn10_dpp, base) + +/* TODO: Use correct number of taps. Using polaris values for now */ +#define LB_TOTAL_NUMBER_OF_ENTRIES 5124 +#define LB_BITS_PER_ENTRY 144 + +#define TF_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +//Used to resolve corner case +#define TF2_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## _ ## field_name ## post_fix + +#define TF_REG_LIST_DCN(id) \ + SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\ + SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\ + SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\ + SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ + SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ + SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \ + SRI(DSCL_MEM_PWR_CTRL, DSCL, id), \ + SRI(OTG_H_BLANK, DSCL, id), \ + SRI(OTG_V_BLANK, DSCL, id), \ + SRI(SCL_MODE, DSCL, id), \ + SRI(LB_DATA_FORMAT, DSCL, id), \ + SRI(LB_MEMORY_CTRL, DSCL, id), \ + SRI(DSCL_AUTOCAL, DSCL, id), \ + SRI(DSCL_CONTROL, DSCL, id), \ + SRI(SCL_BLACK_OFFSET, DSCL, id), \ + SRI(SCL_TAP_CONTROL, DSCL, id), \ + SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \ + SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \ + SRI(DSCL_2TAP_CONTROL, DSCL, id), \ + SRI(MPC_SIZE, DSCL, id), \ + SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \ + SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \ + SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \ + SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \ + SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT_BOT, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT_BOT_C, DSCL, id), \ + SRI(RECOUT_START, DSCL, id), \ + SRI(RECOUT_SIZE, DSCL, id), \ + SRI(CM_ICSC_CONTROL, CM, id), \ + SRI(CM_ICSC_C11_C12, CM, id), \ + SRI(CM_ICSC_C33_C34, CM, id), \ + SRI(CM_DGAM_RAMB_START_CNTL_B, CM, id), \ + SRI(CM_DGAM_RAMB_START_CNTL_G, CM, id), \ + SRI(CM_DGAM_RAMB_START_CNTL_R, CM, id), \ + SRI(CM_DGAM_RAMB_SLOPE_CNTL_B, CM, id), \ + SRI(CM_DGAM_RAMB_SLOPE_CNTL_G, CM, id), \ + SRI(CM_DGAM_RAMB_SLOPE_CNTL_R, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL1_B, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL2_B, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL1_G, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL2_G, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL1_R, CM, id), \ + SRI(CM_DGAM_RAMB_END_CNTL2_R, CM, id), \ + SRI(CM_DGAM_RAMB_REGION_0_1, CM, id), \ + SRI(CM_DGAM_RAMB_REGION_14_15, CM, id), \ + SRI(CM_DGAM_RAMA_START_CNTL_B, CM, id), \ + SRI(CM_DGAM_RAMA_START_CNTL_G, CM, id), \ + SRI(CM_DGAM_RAMA_START_CNTL_R, CM, id), \ + SRI(CM_DGAM_RAMA_SLOPE_CNTL_B, CM, id), \ + SRI(CM_DGAM_RAMA_SLOPE_CNTL_G, CM, id), \ + SRI(CM_DGAM_RAMA_SLOPE_CNTL_R, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL1_B, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL2_B, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL1_G, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL2_G, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL1_R, CM, id), \ + SRI(CM_DGAM_RAMA_END_CNTL2_R, CM, id), \ + SRI(CM_DGAM_RAMA_REGION_0_1, CM, id), \ + SRI(CM_DGAM_RAMA_REGION_14_15, CM, id), \ + SRI(CM_MEM_PWR_CTRL, CM, id), \ + SRI(CM_DGAM_LUT_WRITE_EN_MASK, CM, id), \ + SRI(CM_DGAM_LUT_INDEX, CM, id), \ + SRI(CM_DGAM_LUT_DATA, CM, id), \ + SRI(CM_CONTROL, CM, id), \ + SRI(CM_DGAM_CONTROL, CM, id), \ + SRI(CM_TEST_DEBUG_INDEX, CM, id), \ + SRI(CM_TEST_DEBUG_DATA, CM, id), \ + SRI(FORMAT_CONTROL, CNVC_CFG, id), \ + SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ + SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ + SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ + SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ + SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \ + SRI(DPP_CONTROL, DPP_TOP, id), \ + SRI(CM_HDR_MULT_COEF, CM, id) + + + +#define TF_REG_LIST_DCN10(id) \ + TF_REG_LIST_DCN(id), \ + SRI(CM_COMA_C11_C12, CM, id),\ + SRI(CM_COMA_C33_C34, CM, id),\ + SRI(CM_COMB_C11_C12, CM, id),\ + SRI(CM_COMB_C33_C34, CM, id),\ + SRI(CM_OCSC_CONTROL, CM, id), \ + SRI(CM_OCSC_C11_C12, CM, id), \ + SRI(CM_OCSC_C33_C34, CM, id), \ + SRI(CM_BNS_VALUES_R, CM, id), \ + SRI(CM_BNS_VALUES_G, CM, id), \ + SRI(CM_BNS_VALUES_B, CM, id), \ + SRI(CM_MEM_PWR_CTRL, CM, id), \ + SRI(CM_RGAM_LUT_DATA, CM, id), \ + SRI(CM_RGAM_LUT_WRITE_EN_MASK, CM, id),\ + SRI(CM_RGAM_LUT_INDEX, CM, id), \ + SRI(CM_RGAM_RAMB_START_CNTL_B, CM, id), \ + SRI(CM_RGAM_RAMB_START_CNTL_G, CM, id), \ + SRI(CM_RGAM_RAMB_START_CNTL_R, CM, id), \ + SRI(CM_RGAM_RAMB_SLOPE_CNTL_B, CM, id), \ + SRI(CM_RGAM_RAMB_SLOPE_CNTL_G, CM, id), \ + SRI(CM_RGAM_RAMB_SLOPE_CNTL_R, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL1_B, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL2_B, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL1_G, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL2_G, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL1_R, CM, id), \ + SRI(CM_RGAM_RAMB_END_CNTL2_R, CM, id), \ + SRI(CM_RGAM_RAMB_REGION_0_1, CM, id), \ + SRI(CM_RGAM_RAMB_REGION_32_33, CM, id), \ + SRI(CM_RGAM_RAMA_START_CNTL_B, CM, id), \ + SRI(CM_RGAM_RAMA_START_CNTL_G, CM, id), \ + SRI(CM_RGAM_RAMA_START_CNTL_R, CM, id), \ + SRI(CM_RGAM_RAMA_SLOPE_CNTL_B, CM, id), \ + SRI(CM_RGAM_RAMA_SLOPE_CNTL_G, CM, id), \ + SRI(CM_RGAM_RAMA_SLOPE_CNTL_R, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL1_B, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL2_B, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL1_G, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL2_G, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL1_R, CM, id), \ + SRI(CM_RGAM_RAMA_END_CNTL2_R, CM, id), \ + SRI(CM_RGAM_RAMA_REGION_0_1, CM, id), \ + SRI(CM_RGAM_RAMA_REGION_32_33, CM, id), \ + SRI(CM_RGAM_CONTROL, CM, id), \ + SRI(CM_IGAM_CONTROL, CM, id), \ + SRI(CM_IGAM_LUT_RW_CONTROL, CM, id), \ + SRI(CM_IGAM_LUT_RW_INDEX, CM, id), \ + SRI(CM_IGAM_LUT_SEQ_COLOR, CM, id), \ + SRI(CURSOR_CONTROL, CURSOR, id), \ + SRI(CM_CMOUT_CONTROL, CM, id) + + +#define TF_REG_LIST_SH_MASK_DCN(mask_sh)\ + TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\ + TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\ + TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\ + TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\ + TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\ + TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\ + TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\ + TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\ + TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\ + TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_RGB_Y, mask_sh),\ + TF_SF(DSCL0_SCL_BLACK_OFFSET, SCL_BLACK_OFFSET_CBCR, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\ + TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\ + TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\ + TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\ + TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\ + TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\ + TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_FRAC_BOT, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT, SCL_V_INIT_INT_BOT, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_FRAC_BOT_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_BOT_C, SCL_V_INIT_INT_BOT_C, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \ + TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh), \ + TF_SF(DSCL0_DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, mask_sh), \ + TF_SF(CM0_CM_ICSC_CONTROL, CM_ICSC_MODE, mask_sh), \ + TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C11, mask_sh), \ + TF_SF(CM0_CM_ICSC_C11_C12, CM_ICSC_C12, mask_sh), \ + TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C33, mask_sh), \ + TF_SF(CM0_CM_ICSC_C33_C34, CM_ICSC_C34, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_B, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_G, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_START_CNTL_R, CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_B, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_G, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_SLOPE_CNTL_R, CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_B, CM_DGAM_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_B, CM_DGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_G, CM_DGAM_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_G, CM_DGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL1_R, CM_DGAM_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_END_CNTL2_R, CM_DGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_0_1, CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMB_REGION_14_15, CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_B, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_G, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_START_CNTL_R, CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_B, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_G, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_SLOPE_CNTL_R, CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_B, CM_DGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_B, CM_DGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_G, CM_DGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_G, CM_DGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL1_R, CM_DGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_END_CNTL2_R, CM_DGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_0_1, CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_DGAM_RAMA_REGION_14_15, CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, mask_sh), \ + TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \ + TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \ + TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ + TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ + TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \ + TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh) + +#define TF_REG_LIST_SH_MASK_DCN10(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN(mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_DEPTH, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_EXPAN_MODE, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, PIXEL_REDUCE_MODE, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, DYNAMIC_PIXEL_DEPTH, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, DITHER_EN, mask_sh),\ + TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C11, mask_sh),\ + TF_SF(CM0_CM_COMA_C11_C12, CM_COMA_C12, mask_sh),\ + TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C33, mask_sh),\ + TF_SF(CM0_CM_COMA_C33_C34, CM_COMA_C34, mask_sh),\ + TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C11, mask_sh),\ + TF_SF(CM0_CM_COMB_C11_C12, CM_COMB_C12, mask_sh),\ + TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C33, mask_sh),\ + TF_SF(CM0_CM_COMB_C33_C34, CM_COMB_C34, mask_sh),\ + TF_SF(CM0_CM_OCSC_CONTROL, CM_OCSC_MODE, mask_sh), \ + TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C11, mask_sh), \ + TF_SF(CM0_CM_OCSC_C11_C12, CM_OCSC_C12, mask_sh), \ + TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C33, mask_sh), \ + TF_SF(CM0_CM_OCSC_C33_C34, CM_OCSC_C34, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_R, CM_BNS_BIAS_R, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_G, CM_BNS_BIAS_G, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_B, CM_BNS_BIAS_B, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_R, CM_BNS_SCALE_R, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_G, CM_BNS_SCALE_G, mask_sh), \ + TF_SF(CM0_CM_BNS_VALUES_B, CM_BNS_SCALE_B, mask_sh), \ + TF_SF(CM0_CM_MEM_PWR_CTRL, RGAM_MEM_PWR_FORCE, mask_sh), \ + TF_SF(CM0_CM_RGAM_LUT_DATA, CM_RGAM_LUT_DATA, mask_sh), \ + TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_RGAM_LUT_WRITE_EN_MASK, CM_RGAM_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_RGAM_LUT_INDEX, CM_RGAM_LUT_INDEX, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_B, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_G, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_START_CNTL_R, CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_B, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_G, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_SLOPE_CNTL_R, CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_B, CM_RGAM_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_B, CM_RGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_G, CM_RGAM_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_G, CM_RGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL1_R, CM_RGAM_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_END_CNTL2_R, CM_RGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_0_1, CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMB_REGION_32_33, CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_B, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_G, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_START_CNTL_R, CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_B, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_G, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_SLOPE_CNTL_R, CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_B, CM_RGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_B, CM_RGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_G, CM_RGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_G, CM_RGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL1_R, CM_RGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_END_CNTL2_R, CM_RGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_0_1, CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_RGAM_RAMA_REGION_32_33, CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_RGAM_CONTROL, CM_RGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_R, mask_sh), \ + TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_G, mask_sh), \ + TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_LUT_FORMAT_B, mask_sh), \ + TF_SF(CM0_CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, mask_sh), \ + TF_SF(CM0_CM_CONTROL, CM_BYPASS_EN, mask_sh), \ + TF_SF(CM0_CM_IGAM_LUT_SEQ_COLOR, CM_IGAM_LUT_SEQ_COLOR, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, OUTPUT_FP, mask_sh), \ + TF_SF(CM0_CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, mask_sh), \ + TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ + TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ + TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ + TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh) + +/* + * + DCN1 CM debug status register definition + + register :ID9_CM_STATUS do + implement_ref :cm + map to: :cmdebugind, at: j + width 32 + disclosure NEVER + + field :ID9_VUPDATE_CFG, [0], R + field :ID9_IGAM_LUT_MODE, [2..1], R + field :ID9_BNS_BYPASS, [3], R + field :ID9_ICSC_MODE, [5..4], R + field :ID9_DGAM_LUT_MODE, [8..6], R + field :ID9_HDR_BYPASS, [9], R + field :ID9_GAMUT_REMAP_MODE, [11..10], R + field :ID9_RGAM_LUT_MODE, [14..12], R + #1 free bit + field :ID9_OCSC_MODE, [18..16], R + field :ID9_DENORM_MODE, [21..19], R + field :ID9_ROUND_TRUNC_MODE, [25..22], R + field :ID9_DITHER_EN, [26], R + field :ID9_DITHER_MODE, [28..27], R + end +*/ + +#define TF_DEBUG_REG_LIST_SH_DCN10 \ + .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 4, \ + .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 16 + +#define TF_DEBUG_REG_LIST_MASK_DCN10 \ + .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30, \ + .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 0x70000 + +#define TF_REG_FIELD_LIST(type) \ + type EXT_OVERSCAN_LEFT; \ + type EXT_OVERSCAN_RIGHT; \ + type EXT_OVERSCAN_BOTTOM; \ + type EXT_OVERSCAN_TOP; \ + type OTG_H_BLANK_START; \ + type OTG_H_BLANK_END; \ + type OTG_V_BLANK_START; \ + type OTG_V_BLANK_END; \ + type PIXEL_DEPTH; \ + type PIXEL_EXPAN_MODE; \ + type PIXEL_REDUCE_MODE; \ + type DYNAMIC_PIXEL_DEPTH; \ + type DITHER_EN; \ + type INTERLEAVE_EN; \ + type LB_DATA_FORMAT__ALPHA_EN; \ + type MEMORY_CONFIG; \ + type LB_MAX_PARTITIONS; \ + type AUTOCAL_MODE; \ + type AUTOCAL_NUM_PIPE; \ + type AUTOCAL_PIPE_ID; \ + type SCL_BOUNDARY_MODE; \ + type SCL_BLACK_OFFSET_RGB_Y; \ + type SCL_BLACK_OFFSET_CBCR; \ + type SCL_V_NUM_TAPS; \ + type SCL_H_NUM_TAPS; \ + type SCL_V_NUM_TAPS_C; \ + type SCL_H_NUM_TAPS_C; \ + type SCL_COEF_RAM_TAP_PAIR_IDX; \ + type SCL_COEF_RAM_PHASE; \ + type SCL_COEF_RAM_FILTER_TYPE; \ + type SCL_COEF_RAM_EVEN_TAP_COEF; \ + type SCL_COEF_RAM_EVEN_TAP_COEF_EN; \ + type SCL_COEF_RAM_ODD_TAP_COEF; \ + type SCL_COEF_RAM_ODD_TAP_COEF_EN; \ + type SCL_H_2TAP_HARDCODE_COEF_EN; \ + type SCL_H_2TAP_SHARP_EN; \ + type SCL_H_2TAP_SHARP_FACTOR; \ + type SCL_V_2TAP_HARDCODE_COEF_EN; \ + type SCL_V_2TAP_SHARP_EN; \ + type SCL_V_2TAP_SHARP_FACTOR; \ + type SCL_COEF_RAM_SELECT; \ + type DSCL_MODE; \ + type RECOUT_START_X; \ + type RECOUT_START_Y; \ + type RECOUT_WIDTH; \ + type RECOUT_HEIGHT; \ + type MPC_WIDTH; \ + type MPC_HEIGHT; \ + type SCL_H_SCALE_RATIO; \ + type SCL_V_SCALE_RATIO; \ + type SCL_H_SCALE_RATIO_C; \ + type SCL_V_SCALE_RATIO_C; \ + type SCL_H_INIT_FRAC; \ + type SCL_H_INIT_INT; \ + type SCL_H_INIT_FRAC_C; \ + type SCL_H_INIT_INT_C; \ + type SCL_V_INIT_FRAC; \ + type SCL_V_INIT_INT; \ + type SCL_V_INIT_FRAC_BOT; \ + type SCL_V_INIT_INT_BOT; \ + type SCL_V_INIT_FRAC_C; \ + type SCL_V_INIT_INT_C; \ + type SCL_V_INIT_FRAC_BOT_C; \ + type SCL_V_INIT_INT_BOT_C; \ + type SCL_CHROMA_COEF_MODE; \ + type SCL_COEF_RAM_SELECT_CURRENT; \ + type LUT_MEM_PWR_FORCE; \ + type LUT_MEM_PWR_STATE; \ + type CM_GAMUT_REMAP_MODE; \ + type CM_GAMUT_REMAP_C11; \ + type CM_GAMUT_REMAP_C12; \ + type CM_GAMUT_REMAP_C13; \ + type CM_GAMUT_REMAP_C14; \ + type CM_GAMUT_REMAP_C21; \ + type CM_GAMUT_REMAP_C22; \ + type CM_GAMUT_REMAP_C23; \ + type CM_GAMUT_REMAP_C24; \ + type CM_GAMUT_REMAP_C31; \ + type CM_GAMUT_REMAP_C32; \ + type CM_GAMUT_REMAP_C33; \ + type CM_GAMUT_REMAP_C34; \ + type CM_COMA_C11; \ + type CM_COMA_C12; \ + type CM_COMA_C33; \ + type CM_COMA_C34; \ + type CM_COMB_C11; \ + type CM_COMB_C12; \ + type CM_COMB_C33; \ + type CM_COMB_C34; \ + type CM_OCSC_MODE; \ + type CM_OCSC_C11; \ + type CM_OCSC_C12; \ + type CM_OCSC_C33; \ + type CM_OCSC_C34; \ + type RGAM_MEM_PWR_FORCE; \ + type CM_RGAM_LUT_DATA; \ + type CM_RGAM_LUT_WRITE_EN_MASK; \ + type CM_RGAM_LUT_WRITE_SEL; \ + type CM_RGAM_LUT_INDEX; \ + type CM_RGAM_RAMB_EXP_REGION_START_B; \ + type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ + type CM_RGAM_RAMB_EXP_REGION_START_G; \ + type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ + type CM_RGAM_RAMB_EXP_REGION_START_R; \ + type CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ + type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_RGAM_RAMB_EXP_REGION_END_B; \ + type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; \ + type CM_RGAM_RAMB_EXP_REGION_END_BASE_B; \ + type CM_RGAM_RAMB_EXP_REGION_END_G; \ + type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_G; \ + type CM_RGAM_RAMB_EXP_REGION_END_BASE_G; \ + type CM_RGAM_RAMB_EXP_REGION_END_R; \ + type CM_RGAM_RAMB_EXP_REGION_END_SLOPE_R; \ + type CM_RGAM_RAMB_EXP_REGION_END_BASE_R; \ + type CM_RGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ + type CM_RGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ + type CM_RGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ + type CM_RGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ + type CM_RGAM_RAMB_EXP_REGION32_LUT_OFFSET; \ + type CM_RGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \ + type CM_RGAM_RAMB_EXP_REGION33_LUT_OFFSET; \ + type CM_RGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \ + type CM_RGAM_RAMA_EXP_REGION_START_B; \ + type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ + type CM_RGAM_RAMA_EXP_REGION_START_G; \ + type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ + type CM_RGAM_RAMA_EXP_REGION_START_R; \ + type CM_RGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ + type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_RGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_RGAM_RAMA_EXP_REGION_END_B; \ + type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_B; \ + type CM_RGAM_RAMA_EXP_REGION_END_BASE_B; \ + type CM_RGAM_RAMA_EXP_REGION_END_G; \ + type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_G; \ + type CM_RGAM_RAMA_EXP_REGION_END_BASE_G; \ + type CM_RGAM_RAMA_EXP_REGION_END_R; \ + type CM_RGAM_RAMA_EXP_REGION_END_SLOPE_R; \ + type CM_RGAM_RAMA_EXP_REGION_END_BASE_R; \ + type CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ + type CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ + type CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ + type CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ + type CM_RGAM_RAMA_EXP_REGION32_LUT_OFFSET; \ + type CM_RGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \ + type CM_RGAM_RAMA_EXP_REGION33_LUT_OFFSET; \ + type CM_RGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \ + type CM_RGAM_LUT_MODE; \ + type CM_CMOUT_ROUND_TRUNC_MODE; \ + type CM_BLNDGAM_LUT_MODE; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R; \ + type CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET; \ + type CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS; \ + type CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET; \ + type CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS; \ + type CM_BLNDGAM_LUT_WRITE_EN_MASK; \ + type CM_BLNDGAM_LUT_WRITE_SEL; \ + type CM_BLNDGAM_CONFIG_STATUS; \ + type CM_BLNDGAM_LUT_INDEX; \ + type BLNDGAM_MEM_PWR_FORCE; \ + type CM_3DLUT_MODE; \ + type CM_3DLUT_SIZE; \ + type CM_3DLUT_INDEX; \ + type CM_3DLUT_DATA0; \ + type CM_3DLUT_DATA1; \ + type CM_3DLUT_DATA_30BIT; \ + type CM_3DLUT_WRITE_EN_MASK; \ + type CM_3DLUT_RAM_SEL; \ + type CM_3DLUT_30BIT_EN; \ + type CM_3DLUT_CONFIG_STATUS; \ + type CM_3DLUT_READ_SEL; \ + type CM_SHAPER_LUT_MODE; \ + type CM_SHAPER_RAMB_EXP_REGION_START_B; \ + type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B; \ + type CM_SHAPER_RAMB_EXP_REGION_START_G; \ + type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G; \ + type CM_SHAPER_RAMB_EXP_REGION_START_R; \ + type CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R; \ + type CM_SHAPER_RAMB_EXP_REGION_END_B; \ + type CM_SHAPER_RAMB_EXP_REGION_END_BASE_B; \ + type CM_SHAPER_RAMB_EXP_REGION_END_G; \ + type CM_SHAPER_RAMB_EXP_REGION_END_BASE_G; \ + type CM_SHAPER_RAMB_EXP_REGION_END_R; \ + type CM_SHAPER_RAMB_EXP_REGION_END_BASE_R; \ + type CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS; \ + type CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET; \ + type CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION_START_B; \ + type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B; \ + type CM_SHAPER_RAMA_EXP_REGION_START_G; \ + type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G; \ + type CM_SHAPER_RAMA_EXP_REGION_START_R; \ + type CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R; \ + type CM_SHAPER_RAMA_EXP_REGION_END_B; \ + type CM_SHAPER_RAMA_EXP_REGION_END_BASE_B; \ + type CM_SHAPER_RAMA_EXP_REGION_END_G; \ + type CM_SHAPER_RAMA_EXP_REGION_END_BASE_G; \ + type CM_SHAPER_RAMA_EXP_REGION_END_R; \ + type CM_SHAPER_RAMA_EXP_REGION_END_BASE_R; \ + type CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS; \ + type CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET; \ + type CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS; \ + type CM_SHAPER_LUT_WRITE_EN_MASK; \ + type CM_SHAPER_CONFIG_STATUS; \ + type CM_SHAPER_LUT_WRITE_SEL; \ + type CM_SHAPER_LUT_INDEX; \ + type CM_SHAPER_LUT_DATA; \ + type CM_DGAM_CONFIG_STATUS; \ + type CM_ICSC_MODE; \ + type CM_ICSC_C11; \ + type CM_ICSC_C12; \ + type CM_ICSC_C33; \ + type CM_ICSC_C34; \ + type CM_BNS_BIAS_R; \ + type CM_BNS_BIAS_G; \ + type CM_BNS_BIAS_B; \ + type CM_BNS_SCALE_R; \ + type CM_BNS_SCALE_G; \ + type CM_BNS_SCALE_B; \ + type CM_DGAM_RAMB_EXP_REGION_START_B; \ + type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; \ + type CM_DGAM_RAMB_EXP_REGION_START_G; \ + type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G; \ + type CM_DGAM_RAMB_EXP_REGION_START_R; \ + type CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R; \ + type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_DGAM_RAMB_EXP_REGION_END_B; \ + type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; \ + type CM_DGAM_RAMB_EXP_REGION_END_BASE_B; \ + type CM_DGAM_RAMB_EXP_REGION_END_G; \ + type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G; \ + type CM_DGAM_RAMB_EXP_REGION_END_BASE_G; \ + type CM_DGAM_RAMB_EXP_REGION_END_R; \ + type CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R; \ + type CM_DGAM_RAMB_EXP_REGION_END_BASE_R; \ + type CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET; \ + type CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS; \ + type CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET; \ + type CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS; \ + type CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET; \ + type CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS; \ + type CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET; \ + type CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS; \ + type CM_DGAM_RAMA_EXP_REGION_START_B; \ + type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B; \ + type CM_DGAM_RAMA_EXP_REGION_START_G; \ + type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G; \ + type CM_DGAM_RAMA_EXP_REGION_START_R; \ + type CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R; \ + type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; \ + type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G; \ + type CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R; \ + type CM_DGAM_RAMA_EXP_REGION_END_B; \ + type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B; \ + type CM_DGAM_RAMA_EXP_REGION_END_BASE_B; \ + type CM_DGAM_RAMA_EXP_REGION_END_G; \ + type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G; \ + type CM_DGAM_RAMA_EXP_REGION_END_BASE_G; \ + type CM_DGAM_RAMA_EXP_REGION_END_R; \ + type CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R; \ + type CM_DGAM_RAMA_EXP_REGION_END_BASE_R; \ + type CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; \ + type CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; \ + type CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; \ + type CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; \ + type CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET; \ + type CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS; \ + type CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET; \ + type CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS; \ + type SHARED_MEM_PWR_DIS; \ + type CM_IGAM_LUT_FORMAT_R; \ + type CM_IGAM_LUT_FORMAT_G; \ + type CM_IGAM_LUT_FORMAT_B; \ + type CM_IGAM_LUT_HOST_EN; \ + type CM_IGAM_LUT_RW_MODE; \ + type CM_IGAM_LUT_WRITE_EN_MASK; \ + type CM_IGAM_LUT_SEL; \ + type CM_IGAM_LUT_SEQ_COLOR; \ + type CM_IGAM_DGAM_CONFIG_STATUS; \ + type CM_DGAM_LUT_WRITE_EN_MASK; \ + type CM_DGAM_LUT_WRITE_SEL; \ + type CM_DGAM_LUT_INDEX; \ + type CM_DGAM_LUT_DATA; \ + type CM_DGAM_LUT_MODE; \ + type CM_IGAM_LUT_MODE; \ + type CM_IGAM_INPUT_FORMAT; \ + type CM_IGAM_LUT_RW_INDEX; \ + type CM_BYPASS_EN; \ + type FORMAT_EXPANSION_MODE; \ + type CNVC_BYPASS; \ + type OUTPUT_FP; \ + type CNVC_SURFACE_PIXEL_FORMAT; \ + type CURSOR_MODE; \ + type CURSOR_PITCH; \ + type CURSOR_LINES_PER_CHUNK; \ + type CURSOR_ENABLE; \ + type CUR0_MODE; \ + type CUR0_EXPANSION_MODE; \ + type CUR0_ENABLE; \ + type CM_BYPASS; \ + type CM_TEST_DEBUG_INDEX; \ + type CM_TEST_DEBUG_DATA_ID9_ICSC_MODE; \ + type CM_TEST_DEBUG_DATA_ID9_OCSC_MODE;\ + type FORMAT_CONTROL__ALPHA_EN; \ + type CUR0_COLOR0; \ + type CUR0_COLOR1; \ + type DPPCLK_RATE_CONTROL; \ + type DPP_CLOCK_ENABLE; \ + type CM_HDR_MULT_COEF; \ + type CUR0_FP_BIAS; \ + type CUR0_FP_SCALE; + +struct dcn_dpp_shift { + TF_REG_FIELD_LIST(uint8_t) +}; + +struct dcn_dpp_mask { + TF_REG_FIELD_LIST(uint32_t) +}; + +#define DPP_COMMON_REG_VARIABLE_LIST \ + uint32_t DSCL_EXT_OVERSCAN_LEFT_RIGHT; \ + uint32_t DSCL_EXT_OVERSCAN_TOP_BOTTOM; \ + uint32_t OTG_H_BLANK; \ + uint32_t OTG_V_BLANK; \ + uint32_t DSCL_MEM_PWR_CTRL; \ + uint32_t DSCL_MEM_PWR_STATUS; \ + uint32_t SCL_MODE; \ + uint32_t LB_DATA_FORMAT; \ + uint32_t LB_MEMORY_CTRL; \ + uint32_t DSCL_AUTOCAL; \ + uint32_t DSCL_CONTROL; \ + uint32_t SCL_BLACK_OFFSET; \ + uint32_t SCL_TAP_CONTROL; \ + uint32_t SCL_COEF_RAM_TAP_SELECT; \ + uint32_t SCL_COEF_RAM_TAP_DATA; \ + uint32_t DSCL_2TAP_CONTROL; \ + uint32_t MPC_SIZE; \ + uint32_t SCL_HORZ_FILTER_SCALE_RATIO; \ + uint32_t SCL_VERT_FILTER_SCALE_RATIO; \ + uint32_t SCL_HORZ_FILTER_SCALE_RATIO_C; \ + uint32_t SCL_VERT_FILTER_SCALE_RATIO_C; \ + uint32_t SCL_HORZ_FILTER_INIT; \ + uint32_t SCL_HORZ_FILTER_INIT_C; \ + uint32_t SCL_VERT_FILTER_INIT; \ + uint32_t SCL_VERT_FILTER_INIT_BOT; \ + uint32_t SCL_VERT_FILTER_INIT_C; \ + uint32_t SCL_VERT_FILTER_INIT_BOT_C; \ + uint32_t RECOUT_START; \ + uint32_t RECOUT_SIZE; \ + uint32_t CM_GAMUT_REMAP_CONTROL; \ + uint32_t CM_GAMUT_REMAP_C11_C12; \ + uint32_t CM_GAMUT_REMAP_C13_C14; \ + uint32_t CM_GAMUT_REMAP_C21_C22; \ + uint32_t CM_GAMUT_REMAP_C23_C24; \ + uint32_t CM_GAMUT_REMAP_C31_C32; \ + uint32_t CM_GAMUT_REMAP_C33_C34; \ + uint32_t CM_COMA_C11_C12; \ + uint32_t CM_COMA_C33_C34; \ + uint32_t CM_COMB_C11_C12; \ + uint32_t CM_COMB_C33_C34; \ + uint32_t CM_OCSC_CONTROL; \ + uint32_t CM_OCSC_C11_C12; \ + uint32_t CM_OCSC_C33_C34; \ + uint32_t CM_MEM_PWR_CTRL; \ + uint32_t CM_RGAM_LUT_DATA; \ + uint32_t CM_RGAM_LUT_WRITE_EN_MASK; \ + uint32_t CM_RGAM_LUT_INDEX; \ + uint32_t CM_RGAM_RAMB_START_CNTL_B; \ + uint32_t CM_RGAM_RAMB_START_CNTL_G; \ + uint32_t CM_RGAM_RAMB_START_CNTL_R; \ + uint32_t CM_RGAM_RAMB_SLOPE_CNTL_B; \ + uint32_t CM_RGAM_RAMB_SLOPE_CNTL_G; \ + uint32_t CM_RGAM_RAMB_SLOPE_CNTL_R; \ + uint32_t CM_RGAM_RAMB_END_CNTL1_B; \ + uint32_t CM_RGAM_RAMB_END_CNTL2_B; \ + uint32_t CM_RGAM_RAMB_END_CNTL1_G; \ + uint32_t CM_RGAM_RAMB_END_CNTL2_G; \ + uint32_t CM_RGAM_RAMB_END_CNTL1_R; \ + uint32_t CM_RGAM_RAMB_END_CNTL2_R; \ + uint32_t CM_RGAM_RAMB_REGION_0_1; \ + uint32_t CM_RGAM_RAMB_REGION_32_33; \ + uint32_t CM_RGAM_RAMA_START_CNTL_B; \ + uint32_t CM_RGAM_RAMA_START_CNTL_G; \ + uint32_t CM_RGAM_RAMA_START_CNTL_R; \ + uint32_t CM_RGAM_RAMA_SLOPE_CNTL_B; \ + uint32_t CM_RGAM_RAMA_SLOPE_CNTL_G; \ + uint32_t CM_RGAM_RAMA_SLOPE_CNTL_R; \ + uint32_t CM_RGAM_RAMA_END_CNTL1_B; \ + uint32_t CM_RGAM_RAMA_END_CNTL2_B; \ + uint32_t CM_RGAM_RAMA_END_CNTL1_G; \ + uint32_t CM_RGAM_RAMA_END_CNTL2_G; \ + uint32_t CM_RGAM_RAMA_END_CNTL1_R; \ + uint32_t CM_RGAM_RAMA_END_CNTL2_R; \ + uint32_t CM_RGAM_RAMA_REGION_0_1; \ + uint32_t CM_RGAM_RAMA_REGION_32_33; \ + uint32_t CM_RGAM_CONTROL; \ + uint32_t CM_CMOUT_CONTROL; \ + uint32_t CM_BLNDGAM_LUT_WRITE_EN_MASK; \ + uint32_t CM_BLNDGAM_CONTROL; \ + uint32_t CM_BLNDGAM_RAMB_START_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMB_START_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMB_START_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMB_SLOPE_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL1_B; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL2_B; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL1_G; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL2_G; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL1_R; \ + uint32_t CM_BLNDGAM_RAMB_END_CNTL2_R; \ + uint32_t CM_BLNDGAM_RAMB_REGION_0_1; \ + uint32_t CM_BLNDGAM_RAMB_REGION_2_3; \ + uint32_t CM_BLNDGAM_RAMB_REGION_4_5; \ + uint32_t CM_BLNDGAM_RAMB_REGION_6_7; \ + uint32_t CM_BLNDGAM_RAMB_REGION_8_9; \ + uint32_t CM_BLNDGAM_RAMB_REGION_10_11; \ + uint32_t CM_BLNDGAM_RAMB_REGION_12_13; \ + uint32_t CM_BLNDGAM_RAMB_REGION_14_15; \ + uint32_t CM_BLNDGAM_RAMB_REGION_16_17; \ + uint32_t CM_BLNDGAM_RAMB_REGION_18_19; \ + uint32_t CM_BLNDGAM_RAMB_REGION_20_21; \ + uint32_t CM_BLNDGAM_RAMB_REGION_22_23; \ + uint32_t CM_BLNDGAM_RAMB_REGION_24_25; \ + uint32_t CM_BLNDGAM_RAMB_REGION_26_27; \ + uint32_t CM_BLNDGAM_RAMB_REGION_28_29; \ + uint32_t CM_BLNDGAM_RAMB_REGION_30_31; \ + uint32_t CM_BLNDGAM_RAMB_REGION_32_33; \ + uint32_t CM_BLNDGAM_RAMA_START_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMA_START_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMA_START_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMA_SLOPE_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL1_B; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL2_B; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL1_G; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL2_G; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL1_R; \ + uint32_t CM_BLNDGAM_RAMA_END_CNTL2_R; \ + uint32_t CM_BLNDGAM_RAMA_REGION_0_1; \ + uint32_t CM_BLNDGAM_RAMA_REGION_2_3; \ + uint32_t CM_BLNDGAM_RAMA_REGION_4_5; \ + uint32_t CM_BLNDGAM_RAMA_REGION_6_7; \ + uint32_t CM_BLNDGAM_RAMA_REGION_8_9; \ + uint32_t CM_BLNDGAM_RAMA_REGION_10_11; \ + uint32_t CM_BLNDGAM_RAMA_REGION_12_13; \ + uint32_t CM_BLNDGAM_RAMA_REGION_14_15; \ + uint32_t CM_BLNDGAM_RAMA_REGION_16_17; \ + uint32_t CM_BLNDGAM_RAMA_REGION_18_19; \ + uint32_t CM_BLNDGAM_RAMA_REGION_20_21; \ + uint32_t CM_BLNDGAM_RAMA_REGION_22_23; \ + uint32_t CM_BLNDGAM_RAMA_REGION_24_25; \ + uint32_t CM_BLNDGAM_RAMA_REGION_26_27; \ + uint32_t CM_BLNDGAM_RAMA_REGION_28_29; \ + uint32_t CM_BLNDGAM_RAMA_REGION_30_31; \ + uint32_t CM_BLNDGAM_RAMA_REGION_32_33; \ + uint32_t CM_BLNDGAM_LUT_INDEX; \ + uint32_t CM_3DLUT_MODE; \ + uint32_t CM_3DLUT_INDEX; \ + uint32_t CM_3DLUT_DATA; \ + uint32_t CM_3DLUT_DATA_30BIT; \ + uint32_t CM_3DLUT_READ_WRITE_CONTROL; \ + uint32_t CM_SHAPER_LUT_WRITE_EN_MASK; \ + uint32_t CM_SHAPER_CONTROL; \ + uint32_t CM_SHAPER_RAMB_START_CNTL_B; \ + uint32_t CM_SHAPER_RAMB_START_CNTL_G; \ + uint32_t CM_SHAPER_RAMB_START_CNTL_R; \ + uint32_t CM_SHAPER_RAMB_END_CNTL_B; \ + uint32_t CM_SHAPER_RAMB_END_CNTL_G; \ + uint32_t CM_SHAPER_RAMB_END_CNTL_R; \ + uint32_t CM_SHAPER_RAMB_REGION_0_1; \ + uint32_t CM_SHAPER_RAMB_REGION_2_3; \ + uint32_t CM_SHAPER_RAMB_REGION_4_5; \ + uint32_t CM_SHAPER_RAMB_REGION_6_7; \ + uint32_t CM_SHAPER_RAMB_REGION_8_9; \ + uint32_t CM_SHAPER_RAMB_REGION_10_11; \ + uint32_t CM_SHAPER_RAMB_REGION_12_13; \ + uint32_t CM_SHAPER_RAMB_REGION_14_15; \ + uint32_t CM_SHAPER_RAMB_REGION_16_17; \ + uint32_t CM_SHAPER_RAMB_REGION_18_19; \ + uint32_t CM_SHAPER_RAMB_REGION_20_21; \ + uint32_t CM_SHAPER_RAMB_REGION_22_23; \ + uint32_t CM_SHAPER_RAMB_REGION_24_25; \ + uint32_t CM_SHAPER_RAMB_REGION_26_27; \ + uint32_t CM_SHAPER_RAMB_REGION_28_29; \ + uint32_t CM_SHAPER_RAMB_REGION_30_31; \ + uint32_t CM_SHAPER_RAMB_REGION_32_33; \ + uint32_t CM_SHAPER_RAMA_START_CNTL_B; \ + uint32_t CM_SHAPER_RAMA_START_CNTL_G; \ + uint32_t CM_SHAPER_RAMA_START_CNTL_R; \ + uint32_t CM_SHAPER_RAMA_END_CNTL_B; \ + uint32_t CM_SHAPER_RAMA_END_CNTL_G; \ + uint32_t CM_SHAPER_RAMA_END_CNTL_R; \ + uint32_t CM_SHAPER_RAMA_REGION_0_1; \ + uint32_t CM_SHAPER_RAMA_REGION_2_3; \ + uint32_t CM_SHAPER_RAMA_REGION_4_5; \ + uint32_t CM_SHAPER_RAMA_REGION_6_7; \ + uint32_t CM_SHAPER_RAMA_REGION_8_9; \ + uint32_t CM_SHAPER_RAMA_REGION_10_11; \ + uint32_t CM_SHAPER_RAMA_REGION_12_13; \ + uint32_t CM_SHAPER_RAMA_REGION_14_15; \ + uint32_t CM_SHAPER_RAMA_REGION_16_17; \ + uint32_t CM_SHAPER_RAMA_REGION_18_19; \ + uint32_t CM_SHAPER_RAMA_REGION_20_21; \ + uint32_t CM_SHAPER_RAMA_REGION_22_23; \ + uint32_t CM_SHAPER_RAMA_REGION_24_25; \ + uint32_t CM_SHAPER_RAMA_REGION_26_27; \ + uint32_t CM_SHAPER_RAMA_REGION_28_29; \ + uint32_t CM_SHAPER_RAMA_REGION_30_31; \ + uint32_t CM_SHAPER_RAMA_REGION_32_33; \ + uint32_t CM_SHAPER_LUT_INDEX; \ + uint32_t CM_SHAPER_LUT_DATA; \ + uint32_t CM_ICSC_CONTROL; \ + uint32_t CM_ICSC_C11_C12; \ + uint32_t CM_ICSC_C33_C34; \ + uint32_t CM_BNS_VALUES_R; \ + uint32_t CM_BNS_VALUES_G; \ + uint32_t CM_BNS_VALUES_B; \ + uint32_t CM_DGAM_RAMB_START_CNTL_B; \ + uint32_t CM_DGAM_RAMB_START_CNTL_G; \ + uint32_t CM_DGAM_RAMB_START_CNTL_R; \ + uint32_t CM_DGAM_RAMB_SLOPE_CNTL_B; \ + uint32_t CM_DGAM_RAMB_SLOPE_CNTL_G; \ + uint32_t CM_DGAM_RAMB_SLOPE_CNTL_R; \ + uint32_t CM_DGAM_RAMB_END_CNTL1_B; \ + uint32_t CM_DGAM_RAMB_END_CNTL2_B; \ + uint32_t CM_DGAM_RAMB_END_CNTL1_G; \ + uint32_t CM_DGAM_RAMB_END_CNTL2_G; \ + uint32_t CM_DGAM_RAMB_END_CNTL1_R; \ + uint32_t CM_DGAM_RAMB_END_CNTL2_R; \ + uint32_t CM_DGAM_RAMB_REGION_0_1; \ + uint32_t CM_DGAM_RAMB_REGION_14_15; \ + uint32_t CM_DGAM_RAMA_START_CNTL_B; \ + uint32_t CM_DGAM_RAMA_START_CNTL_G; \ + uint32_t CM_DGAM_RAMA_START_CNTL_R; \ + uint32_t CM_DGAM_RAMA_SLOPE_CNTL_B; \ + uint32_t CM_DGAM_RAMA_SLOPE_CNTL_G; \ + uint32_t CM_DGAM_RAMA_SLOPE_CNTL_R; \ + uint32_t CM_DGAM_RAMA_END_CNTL1_B; \ + uint32_t CM_DGAM_RAMA_END_CNTL2_B; \ + uint32_t CM_DGAM_RAMA_END_CNTL1_G; \ + uint32_t CM_DGAM_RAMA_END_CNTL2_G; \ + uint32_t CM_DGAM_RAMA_END_CNTL1_R; \ + uint32_t CM_DGAM_RAMA_END_CNTL2_R; \ + uint32_t CM_DGAM_RAMA_REGION_0_1; \ + uint32_t CM_DGAM_RAMA_REGION_14_15; \ + uint32_t CM_DGAM_LUT_WRITE_EN_MASK; \ + uint32_t CM_DGAM_LUT_INDEX; \ + uint32_t CM_DGAM_LUT_DATA; \ + uint32_t CM_CONTROL; \ + uint32_t CM_DGAM_CONTROL; \ + uint32_t CM_IGAM_CONTROL; \ + uint32_t CM_IGAM_LUT_RW_CONTROL; \ + uint32_t CM_IGAM_LUT_RW_INDEX; \ + uint32_t CM_IGAM_LUT_SEQ_COLOR; \ + uint32_t CM_TEST_DEBUG_INDEX; \ + uint32_t CM_TEST_DEBUG_DATA; \ + uint32_t FORMAT_CONTROL; \ + uint32_t CNVC_SURFACE_PIXEL_FORMAT; \ + uint32_t CURSOR_CONTROL; \ + uint32_t CURSOR0_CONTROL; \ + uint32_t CURSOR0_COLOR0; \ + uint32_t CURSOR0_COLOR1; \ + uint32_t DPP_CONTROL; \ + uint32_t CM_HDR_MULT_COEF; \ + uint32_t CURSOR0_FP_SCALE_BIAS; + +struct dcn_dpp_registers { + DPP_COMMON_REG_VARIABLE_LIST +}; + +struct dcn10_dpp { + struct dpp base; + + const struct dcn_dpp_registers *tf_regs; + const struct dcn_dpp_shift *tf_shift; + const struct dcn_dpp_mask *tf_mask; + + const uint16_t *filter_v; + const uint16_t *filter_h; + const uint16_t *filter_v_c; + const uint16_t *filter_h_c; + int lb_pixel_depth_supported; + int lb_memory_size; + int lb_bits_per_entry; + bool is_write_to_ram_a_safe; + struct scaler_data scl_data; + struct pwl_params pwl_data; +}; + +enum dcn10_input_csc_select { + INPUT_CSC_SELECT_BYPASS = 0, + INPUT_CSC_SELECT_ICSC = 1, + INPUT_CSC_SELECT_COMA = 2 +}; + +void dpp1_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes); + +void dpp1_set_cursor_position( + struct dpp *dpp_base, + const struct dc_cursor_position *pos, + const struct dc_cursor_mi_param *param, + uint32_t width, + uint32_t height); + +void dpp1_cnv_set_optional_cursor_attributes( + struct dpp *dpp_base, + struct dpp_cursor_attributes *attr); + +bool dpp1_dscl_is_lb_conf_valid( + int ceil_vratio, + int num_partitions, + int vtaps); + +void dpp1_dscl_calc_lb_num_partitions( + const struct scaler_data *scl_data, + enum lb_memory_config lb_config, + int *num_part_y, + int *num_part_c); + +void dpp1_degamma_ram_select( + struct dpp *dpp_base, + bool use_ram_a); + +void dpp1_program_degamma_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params); + +void dpp1_program_degamma_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params); + +void dpp1_program_degamma_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num, + bool is_ram_a); + +void dpp1_power_on_degamma_lut( + struct dpp *dpp_base, + bool power_on); + +void dpp1_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn10_input_csc_select select, + const struct out_csc_color_matrix *tbl_entry); + +void dpp1_program_bias_and_scale( + struct dpp *dpp_base, + struct dc_bias_and_scale *params); + +void dpp1_program_input_lut( + struct dpp *dpp_base, + const struct dc_gamma *gamma); + +void dpp1_full_bypass(struct dpp *dpp_base); + +void dpp1_set_degamma( + struct dpp *dpp_base, + enum ipp_degamma_mode mode); + +void dpp1_set_degamma_pwl(struct dpp *dpp_base, + const struct pwl_params *params); + + +void dpp_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s); + +void dpp_reset(struct dpp *dpp_base); + +void dpp1_cm_program_regamma_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num); + +void dpp1_cm_power_on_regamma_lut( + struct dpp *dpp_base, + bool power_on); + +void dpp1_cm_configure_regamma_lut( + struct dpp *dpp_base, + bool is_ram_a); + +/*program re gamma RAM A*/ +void dpp1_cm_program_regamma_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params); + +/*program re gamma RAM B*/ +void dpp1_cm_program_regamma_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params); +void dpp1_cm_set_output_csc_adjustment( + struct dpp *dpp_base, + const uint16_t *regval); + +void dpp1_cm_set_output_csc_default( + struct dpp *dpp_base, + enum dc_color_space colorspace); + +void dpp1_cm_set_gamut_remap( + struct dpp *dpp, + const struct dpp_grph_csc_adjustment *adjust); + +void dpp1_dscl_set_scaler_manual_scale( + struct dpp *dpp_base, + const struct scaler_data *scl_data); + +void dpp1_cnv_setup ( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut); + +void dpp1_dppclk_control( + struct dpp *dpp_base, + bool dppclk_div, + bool enable); + +void dpp1_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier); + +bool dpp1_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps); + +void dpp1_construct(struct dcn10_dpp *dpp1, + struct dc_context *ctx, + uint32_t inst, + const struct dcn_dpp_registers *tf_regs, + const struct dcn_dpp_shift *tf_shift, + const struct dcn_dpp_mask *tf_mask); + +void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c new file mode 100644 index 000000000000..20481b144609 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c @@ -0,0 +1,884 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn10/dcn10_dpp.h" +#include "basics/conversion.h" +#include "dcn10/dcn10_cm_common.h" + +#define NUM_PHASES 64 +#define HORZ_MAX_TAPS 8 +#define VERT_MAX_TAPS 8 + +#define BLACK_OFFSET_RGB_Y 0x0 +#define BLACK_OFFSET_CBCR 0x8000 + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) + + +enum dcn10_coef_filter_type_sel { + SCL_COEF_LUMA_VERT_FILTER = 0, + SCL_COEF_LUMA_HORZ_FILTER = 1, + SCL_COEF_CHROMA_VERT_FILTER = 2, + SCL_COEF_CHROMA_HORZ_FILTER = 3, + SCL_COEF_ALPHA_VERT_FILTER = 4, + SCL_COEF_ALPHA_HORZ_FILTER = 5 +}; + +enum dscl_autocal_mode { + AUTOCAL_MODE_OFF = 0, + + /* Autocal calculate the scaling ratio and initial phase and the + * DSCL_MODE_SEL must be set to 1 + */ + AUTOCAL_MODE_AUTOSCALE = 1, + /* Autocal perform auto centering without replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOCENTER = 2, + /* Autocal perform auto centering and auto replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOREPLICATE = 3 +}; + +enum dscl_mode_sel { + DSCL_MODE_SCALING_444_BYPASS = 0, + DSCL_MODE_SCALING_444_RGB_ENABLE = 1, + DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, + DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, + DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, + DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, + DSCL_MODE_DSCL_BYPASS = 6 +}; + +static void program_gamut_remap( + struct dcn10_dpp *dpp, + const uint16_t *regval, + enum gamut_remap_select select) +{ + uint16_t selection = 0; + struct color_matrices_reg gam_regs; + + if (regval == NULL || select == GAMUT_REMAP_BYPASS) { + REG_SET(CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, 0); + return; + } + switch (select) { + case GAMUT_REMAP_COEFF: + selection = 1; + break; + case GAMUT_REMAP_COMA_COEFF: + selection = 2; + break; + case GAMUT_REMAP_COMB_COEFF: + selection = 3; + break; + default: + break; + } + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + + if (select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else { + + gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + } + + REG_SET( + CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, selection); + +} + +void dpp1_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + int i = 0; + + if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) + /* Bypass if type is bypass or hw */ + program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS); + else { + struct fixed31_32 arr_matrix[12]; + uint16_t arr_reg_val[12]; + + for (i = 0; i < 12; i++) + arr_matrix[i] = adjust->temperature_matrix[i]; + + convert_float_matrix( + arr_reg_val, arr_matrix, 12); + + program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF); + } +} + +static void read_gamut_remap(struct dcn10_dpp *dpp, + uint16_t *regval, + enum gamut_remap_select *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + REG_GET(CM_GAMUT_REMAP_CONTROL, + CM_GAMUT_REMAP_MODE, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == GAMUT_REMAP_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMB_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); + + cm_helper_read_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp1_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + uint16_t arr_reg_val[12]; + enum gamut_remap_select select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + +static void dpp1_cm_program_color_matrix( + struct dcn10_dpp *dpp, + const uint16_t *regval) +{ + uint32_t ocsc_mode; + uint32_t cur_mode; + struct color_matrices_reg gam_regs; + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + + /* determine which CSC matrix (ocsc or comb) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + REG_SET(CM_TEST_DEBUG_INDEX, 0, + CM_TEST_DEBUG_INDEX, 9); + + REG_GET(CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode); + + if (cur_mode != 4) + ocsc_mode = 4; + else + ocsc_mode = 5; + + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12; + + if (ocsc_mode == 4) { + + gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34); + + } else { + + gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); + + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); + +} + +void dpp1_cm_set_output_csc_default( + struct dpp *dpp_base, + enum dc_color_space colorspace) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + const uint16_t *regval = NULL; + int arr_size; + + regval = find_color_matrix(colorspace, &arr_size); + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + + dpp1_cm_program_color_matrix(dpp, regval); +} + +static void dpp1_cm_get_reg_field( + struct dcn10_dpp *dpp, + struct xfer_func_reg *reg) +{ + reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B; + reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; + reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; + reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B; + reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B; +} + +static void dpp1_cm_get_degamma_reg_field( + struct dcn10_dpp *dpp, + struct xfer_func_reg *reg) +{ + reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B; + reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; + reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B; + reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B; + reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B; +} +void dpp1_cm_set_output_csc_adjustment( + struct dpp *dpp_base, + const uint16_t *regval) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + dpp1_cm_program_color_matrix(dpp, regval); +} + +void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base, + bool power_on) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_SET(CM_MEM_PWR_CTRL, 0, + RGAM_MEM_PWR_FORCE, power_on == true ? 0:1); + +} + +void dpp1_cm_program_regamma_lut(struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_SEQ_START(); + + for (i = 0 ; i < num; i++) { + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg); + + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg); + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg); + REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg); + + } + +} + +void dpp1_cm_configure_regamma_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, + CM_RGAM_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK, + CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); + REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0); +} + +/*program re gamma RAM A*/ +void dpp1_cm_program_regamma_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dpp1_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R); + gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1); + gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33); + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); + +} + +/*program re gamma RAM B*/ +void dpp1_cm_program_regamma_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dpp1_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R); + gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1); + gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33); + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +void dpp1_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn10_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + int i; + int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); + const uint16_t *regval = NULL; + uint32_t cur_select = 0; + enum dcn10_input_csc_select select; + struct color_matrices_reg gam_regs; + + if (input_select == INPUT_CSC_SELECT_BYPASS) { + REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); + return; + } + + if (tbl_entry == NULL) { + for (i = 0; i < arr_size; i++) + if (dpp_input_csc_matrix[i].color_space == color_space) { + regval = dpp_input_csc_matrix[i].regval; + break; + } + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + } else { + regval = tbl_entry->regval; + } + + /* determine which CSC matrix (icsc or coma) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + REG_SET(CM_TEST_DEBUG_INDEX, 0, + CM_TEST_DEBUG_INDEX, 9); + + REG_GET(CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select); + + if (cur_select != INPUT_CSC_SELECT_ICSC) + select = INPUT_CSC_SELECT_ICSC; + else + select = INPUT_CSC_SELECT_COMA; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; + + if (select == INPUT_CSC_SELECT_ICSC) { + + gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); + + } else { + + gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); + + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET(CM_ICSC_CONTROL, 0, + CM_ICSC_MODE, select); +} + +//keep here for now, decide multi dce support later +void dpp1_program_bias_and_scale( + struct dpp *dpp_base, + struct dc_bias_and_scale *params) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_SET_2(CM_BNS_VALUES_R, 0, + CM_BNS_SCALE_R, params->scale_red, + CM_BNS_BIAS_R, params->bias_red); + + REG_SET_2(CM_BNS_VALUES_G, 0, + CM_BNS_SCALE_G, params->scale_green, + CM_BNS_BIAS_G, params->bias_green); + + REG_SET_2(CM_BNS_VALUES_B, 0, + CM_BNS_SCALE_B, params->scale_blue, + CM_BNS_BIAS_B, params->bias_blue); + +} + +/*program de gamma RAM B*/ +void dpp1_program_degamma_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R); + gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1); + gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15); + + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +/*program de gamma RAM A*/ +void dpp1_program_degamma_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dpp1_cm_get_degamma_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R); + gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1); + gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15); + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +void dpp1_power_on_degamma_lut( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_SET(CM_MEM_PWR_CTRL, 0, + SHARED_MEM_PWR_DIS, power_on ? 0:1); + +} + +static void dpp1_enable_cm_block( + struct dpp *dpp_base) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8); + REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0); +} + +void dpp1_set_degamma( + struct dpp *dpp_base, + enum ipp_degamma_mode mode) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + dpp1_enable_cm_block(dpp_base); + + switch (mode) { + case IPP_DEGAMMA_MODE_BYPASS: + /* Setting de gamma bypass for now */ + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); + break; + case IPP_DEGAMMA_MODE_HW_sRGB: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); + break; + case IPP_DEGAMMA_MODE_HW_xvYCC: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); + break; + case IPP_DEGAMMA_MODE_USER_PWL: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + + REG_SEQ_SUBMIT(); + REG_SEQ_WAIT_DONE(); +} + +void dpp1_degamma_ram_select( + struct dpp *dpp_base, + bool use_ram_a) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + if (use_ram_a) + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + else + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4); + +} + +static bool dpp1_degamma_ram_inuse( + struct dpp *dpp_base, + bool *ram_a_inuse) +{ + bool ret = false; + uint32_t status_reg = 0; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, + &status_reg); + + if (status_reg == 9) { + *ram_a_inuse = true; + ret = true; + } else if (status_reg == 10) { + *ram_a_inuse = false; + ret = true; + } + return ret; +} + +void dpp1_program_degamma_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num, + bool is_ram_a) +{ + uint32_t i; + + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0); + REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, + CM_DGAM_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, + is_ram_a == true ? 0:1); + + REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); + for (i = 0 ; i < num; i++) { + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); + + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); + } +} + +void dpp1_set_degamma_pwl(struct dpp *dpp_base, + const struct pwl_params *params) +{ + bool is_ram_a = true; + + dpp1_power_on_degamma_lut(dpp_base, true); + dpp1_enable_cm_block(dpp_base); + dpp1_degamma_ram_inuse(dpp_base, &is_ram_a); + if (is_ram_a == true) + dpp1_program_degamma_lutb_settings(dpp_base, params); + else + dpp1_program_degamma_luta_settings(dpp_base, params); + + dpp1_program_degamma_lut(dpp_base, params->rgb_resulted, + params->hw_points_num, !is_ram_a); + dpp1_degamma_ram_select(dpp_base, !is_ram_a); +} + +void dpp1_full_bypass(struct dpp *dpp_base) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + /* Input pixel format: ARGB8888 */ + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, 0x8); + + /* Zero expansion */ + REG_SET_3(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_CONTROL__ALPHA_EN, 0, + FORMAT_EXPANSION_MODE, 0); + + /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ + if (dpp->tf_mask->CM_BYPASS_EN) + REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); + else + REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); + + /* Setting degamma bypass for now */ + REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); +} + +static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base, + bool *ram_a_inuse) +{ + bool in_use = false; + uint32_t status_reg = 0; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS, + &status_reg); + + // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB + if (status_reg == 1 || status_reg == 3 || status_reg == 4) { + *ram_a_inuse = true; + in_use = true; + // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB + } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) { + *ram_a_inuse = false; + in_use = true; + } + return in_use; +} + +/* + * Input gamma LUT currently supports 256 values only. This means input color + * can have a maximum of 8 bits per channel (= 256 possible values) in order to + * have a one-to-one mapping with the LUT. Truncation will occur with color + * values greater than 8 bits. + * + * In the future, this function should support additional input gamma methods, + * such as piecewise linear mapping, and input gamma bypass. + */ +void dpp1_program_input_lut( + struct dpp *dpp_base, + const struct dc_gamma *gamma) +{ + int i; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + bool rama_occupied = false; + uint32_t ram_num; + // Power on LUT memory. + REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1); + dpp1_enable_cm_block(dpp_base); + // Determine whether to use RAM A or RAM B + dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied); + if (!rama_occupied) + REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0); + else + REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1); + // RW mode is 256-entry LUT + REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0); + // IGAM Input format should be 8 bits per channel. + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0); + // Do not mask any R,G,B values + REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7); + // LUT-256, unsigned, integer, new u0.12 format + REG_UPDATE_3( + CM_IGAM_CONTROL, + CM_IGAM_LUT_FORMAT_R, 3, + CM_IGAM_LUT_FORMAT_G, 3, + CM_IGAM_LUT_FORMAT_B, 3); + // Start at index 0 of IGAM LUT + REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0); + for (i = 0; i < gamma->num_entries; i++) { + REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, + dc_fixpt_round( + gamma->entries.red[i])); + REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, + dc_fixpt_round( + gamma->entries.green[i])); + REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, + dc_fixpt_round( + gamma->entries.blue[i])); + } + // Power off LUT memory + REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0); + // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB + REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2); + REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num); +} + +void dpp1_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c new file mode 100644 index 000000000000..808bca9fb804 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c @@ -0,0 +1,696 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn10/dcn10_dpp.h" +#include "basics/conversion.h" + + +#define NUM_PHASES 64 +#define HORZ_MAX_TAPS 8 +#define VERT_MAX_TAPS 8 + +#define BLACK_OFFSET_RGB_Y 0x0 +#define BLACK_OFFSET_CBCR 0x8000 + + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +enum dcn10_coef_filter_type_sel { + SCL_COEF_LUMA_VERT_FILTER = 0, + SCL_COEF_LUMA_HORZ_FILTER = 1, + SCL_COEF_CHROMA_VERT_FILTER = 2, + SCL_COEF_CHROMA_HORZ_FILTER = 3, + SCL_COEF_ALPHA_VERT_FILTER = 4, + SCL_COEF_ALPHA_HORZ_FILTER = 5 +}; + +enum dscl_autocal_mode { + AUTOCAL_MODE_OFF = 0, + + /* Autocal calculate the scaling ratio and initial phase and the + * DSCL_MODE_SEL must be set to 1 + */ + AUTOCAL_MODE_AUTOSCALE = 1, + /* Autocal perform auto centering without replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOCENTER = 2, + /* Autocal perform auto centering and auto replication and the + * DSCL_MODE_SEL must be set to 0 + */ + AUTOCAL_MODE_AUTOREPLICATE = 3 +}; + +enum dscl_mode_sel { + DSCL_MODE_SCALING_444_BYPASS = 0, + DSCL_MODE_SCALING_444_RGB_ENABLE = 1, + DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2, + DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3, + DSCL_MODE_SCALING_420_LUMA_BYPASS = 4, + DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5, + DSCL_MODE_DSCL_BYPASS = 6 +}; + +static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth) +{ + if (depth == LB_PIXEL_DEPTH_30BPP) + return 0; /* 10 bpc */ + else if (depth == LB_PIXEL_DEPTH_24BPP) + return 1; /* 8 bpc */ + else if (depth == LB_PIXEL_DEPTH_18BPP) + return 2; /* 6 bpc */ + else if (depth == LB_PIXEL_DEPTH_36BPP) + return 3; /* 12 bpc */ + else { + ASSERT(0); + return -1; /* Unsupported */ + } +} + +static bool dpp1_dscl_is_video_format(enum pixel_format format) +{ + if (format >= PIXEL_FORMAT_VIDEO_BEGIN + && format <= PIXEL_FORMAT_VIDEO_END) + return true; + else + return false; +} + +static bool dpp1_dscl_is_420_format(enum pixel_format format) +{ + if (format == PIXEL_FORMAT_420BPP8 || + format == PIXEL_FORMAT_420BPP10) + return true; + else + return false; +} + +static enum dscl_mode_sel dpp1_dscl_get_dscl_mode( + struct dpp *dpp_base, + const struct scaler_data *data, + bool dbg_always_scale) +{ + const long long one = dc_fixpt_one.value; + + if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { + /* DSCL is processing data in fixed format */ + if (data->format == PIXEL_FORMAT_FP16) + return DSCL_MODE_DSCL_BYPASS; + } + + if (data->ratios.horz.value == one + && data->ratios.vert.value == one + && data->ratios.horz_c.value == one + && data->ratios.vert_c.value == one + && !dbg_always_scale) + return DSCL_MODE_SCALING_444_BYPASS; + + if (!dpp1_dscl_is_420_format(data->format)) { + if (dpp1_dscl_is_video_format(data->format)) + return DSCL_MODE_SCALING_444_YCBCR_ENABLE; + else + return DSCL_MODE_SCALING_444_RGB_ENABLE; + } + if (data->ratios.horz.value == one && data->ratios.vert.value == one) + return DSCL_MODE_SCALING_420_LUMA_BYPASS; + if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one) + return DSCL_MODE_SCALING_420_CHROMA_BYPASS; + + return DSCL_MODE_SCALING_420_YCBCR_ENABLE; +} + +static void dpp1_power_on_dscl( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) { + if (power_on) { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0); + REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5); + } else { + if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) { + dpp->base.ctx->dc->optimized_required = true; + dpp->base.deferred_reg_writes.bits.disable_dscl = true; + } else { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); + } + } + } +} + + +static void dpp1_dscl_set_lb( + struct dcn10_dpp *dpp, + const struct line_buffer_params *lb_params, + enum lb_memory_config mem_size_config) +{ + uint32_t max_partitions = 63; /* Currently hardcoded on all ASICs before DCN 3.2 */ + + /* LB */ + if (dpp->base.caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { + /* DSCL caps: pixel data processed in fixed format */ + uint32_t pixel_depth = dpp1_dscl_get_pixel_depth_val(lb_params->depth); + uint32_t dyn_pix_depth = lb_params->dynamic_pixel_depth; + + REG_SET_7(LB_DATA_FORMAT, 0, + PIXEL_DEPTH, pixel_depth, /* Pixel depth stored in LB */ + PIXEL_EXPAN_MODE, lb_params->pixel_expan_mode, /* Pixel expansion mode */ + PIXEL_REDUCE_MODE, 1, /* Pixel reduction mode: Rounding */ + DYNAMIC_PIXEL_DEPTH, dyn_pix_depth, /* Dynamic expansion pixel depth */ + DITHER_EN, 0, /* Dithering enable: Disabled */ + INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ + LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ + } else { + /* DSCL caps: pixel data processed in float format */ + REG_SET_2(LB_DATA_FORMAT, 0, + INTERLEAVE_EN, lb_params->interleave_en, /* Interleave source enable */ + LB_DATA_FORMAT__ALPHA_EN, lb_params->alpha_en); /* Alpha enable */ + } + + if (dpp->base.caps->max_lb_partitions == 31) + max_partitions = 31; + + REG_SET_2(LB_MEMORY_CTRL, 0, + MEMORY_CONFIG, mem_size_config, + LB_MAX_PARTITIONS, max_partitions); +} + +static const uint16_t *dpp1_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio) +{ + if (taps == 8) + return get_filter_8tap_64p(ratio); + else if (taps == 7) + return get_filter_7tap_64p(ratio); + else if (taps == 6) + return get_filter_6tap_64p(ratio); + else if (taps == 5) + return get_filter_5tap_64p(ratio); + else if (taps == 4) + return get_filter_4tap_64p(ratio); + else if (taps == 3) + return get_filter_3tap_64p(ratio); + else if (taps == 2) + return get_filter_2tap_64p(); + else if (taps == 1) + return NULL; + else { + /* should never happen, bug */ + BREAK_TO_DEBUGGER(); + return NULL; + } +} + +static void dpp1_dscl_set_scaler_filter( + struct dcn10_dpp *dpp, + uint32_t taps, + enum dcn10_coef_filter_type_sel filter_type, + const uint16_t *filter) +{ + const int tap_pairs = (taps + 1) / 2; + int phase; + int pair; + uint16_t odd_coef, even_coef; + + REG_SET_3(SCL_COEF_RAM_TAP_SELECT, 0, + SCL_COEF_RAM_TAP_PAIR_IDX, 0, + SCL_COEF_RAM_PHASE, 0, + SCL_COEF_RAM_FILTER_TYPE, filter_type); + + for (phase = 0; phase < (NUM_PHASES / 2 + 1); phase++) { + for (pair = 0; pair < tap_pairs; pair++) { + even_coef = filter[phase * taps + 2 * pair]; + if ((pair * 2 + 1) < taps) + odd_coef = filter[phase * taps + 2 * pair + 1]; + else + odd_coef = 0; + + REG_SET_4(SCL_COEF_RAM_TAP_DATA, 0, + /* Even tap coefficient (bits 1:0 fixed to 0) */ + SCL_COEF_RAM_EVEN_TAP_COEF, even_coef, + /* Write/read control for even coefficient */ + SCL_COEF_RAM_EVEN_TAP_COEF_EN, 1, + /* Odd tap coefficient (bits 1:0 fixed to 0) */ + SCL_COEF_RAM_ODD_TAP_COEF, odd_coef, + /* Write/read control for odd coefficient */ + SCL_COEF_RAM_ODD_TAP_COEF_EN, 1); + } + } + +} + +static void dpp1_dscl_set_scl_filter( + struct dcn10_dpp *dpp, + const struct scaler_data *scl_data, + bool chroma_coef_mode) +{ + bool h_2tap_hardcode_coef_en = false; + bool v_2tap_hardcode_coef_en = false; + bool h_2tap_sharp_en = false; + bool v_2tap_sharp_en = false; + uint32_t h_2tap_sharp_factor = scl_data->sharpness.horz; + uint32_t v_2tap_sharp_factor = scl_data->sharpness.vert; + bool coef_ram_current; + const uint16_t *filter_h = NULL; + const uint16_t *filter_v = NULL; + const uint16_t *filter_h_c = NULL; + const uint16_t *filter_v_c = NULL; + + h_2tap_hardcode_coef_en = scl_data->taps.h_taps < 3 + && scl_data->taps.h_taps_c < 3 + && (scl_data->taps.h_taps > 1 && scl_data->taps.h_taps_c > 1); + v_2tap_hardcode_coef_en = scl_data->taps.v_taps < 3 + && scl_data->taps.v_taps_c < 3 + && (scl_data->taps.v_taps > 1 && scl_data->taps.v_taps_c > 1); + + h_2tap_sharp_en = h_2tap_hardcode_coef_en && h_2tap_sharp_factor != 0; + v_2tap_sharp_en = v_2tap_hardcode_coef_en && v_2tap_sharp_factor != 0; + + REG_UPDATE_6(DSCL_2TAP_CONTROL, + SCL_H_2TAP_HARDCODE_COEF_EN, h_2tap_hardcode_coef_en, + SCL_H_2TAP_SHARP_EN, h_2tap_sharp_en, + SCL_H_2TAP_SHARP_FACTOR, h_2tap_sharp_factor, + SCL_V_2TAP_HARDCODE_COEF_EN, v_2tap_hardcode_coef_en, + SCL_V_2TAP_SHARP_EN, v_2tap_sharp_en, + SCL_V_2TAP_SHARP_FACTOR, v_2tap_sharp_factor); + + if (!v_2tap_hardcode_coef_en || !h_2tap_hardcode_coef_en) { + bool filter_updated = false; + + filter_h = dpp1_dscl_get_filter_coeffs_64p( + scl_data->taps.h_taps, scl_data->ratios.horz); + filter_v = dpp1_dscl_get_filter_coeffs_64p( + scl_data->taps.v_taps, scl_data->ratios.vert); + + filter_updated = (filter_h && (filter_h != dpp->filter_h)) + || (filter_v && (filter_v != dpp->filter_v)); + + if (chroma_coef_mode) { + filter_h_c = dpp1_dscl_get_filter_coeffs_64p( + scl_data->taps.h_taps_c, scl_data->ratios.horz_c); + filter_v_c = dpp1_dscl_get_filter_coeffs_64p( + scl_data->taps.v_taps_c, scl_data->ratios.vert_c); + filter_updated = filter_updated || (filter_h_c && (filter_h_c != dpp->filter_h_c)) + || (filter_v_c && (filter_v_c != dpp->filter_v_c)); + } + + if (filter_updated) { + uint32_t scl_mode = REG_READ(SCL_MODE); + + if (!h_2tap_hardcode_coef_en && filter_h) { + dpp1_dscl_set_scaler_filter( + dpp, scl_data->taps.h_taps, + SCL_COEF_LUMA_HORZ_FILTER, filter_h); + } + dpp->filter_h = filter_h; + if (!v_2tap_hardcode_coef_en && filter_v) { + dpp1_dscl_set_scaler_filter( + dpp, scl_data->taps.v_taps, + SCL_COEF_LUMA_VERT_FILTER, filter_v); + } + dpp->filter_v = filter_v; + if (chroma_coef_mode) { + if (!h_2tap_hardcode_coef_en && filter_h_c) { + dpp1_dscl_set_scaler_filter( + dpp, scl_data->taps.h_taps_c, + SCL_COEF_CHROMA_HORZ_FILTER, filter_h_c); + } + if (!v_2tap_hardcode_coef_en && filter_v_c) { + dpp1_dscl_set_scaler_filter( + dpp, scl_data->taps.v_taps_c, + SCL_COEF_CHROMA_VERT_FILTER, filter_v_c); + } + } + dpp->filter_h_c = filter_h_c; + dpp->filter_v_c = filter_v_c; + + coef_ram_current = get_reg_field_value_ex( + scl_mode, dpp->tf_mask->SCL_COEF_RAM_SELECT_CURRENT, + dpp->tf_shift->SCL_COEF_RAM_SELECT_CURRENT); + + /* Swap coefficient RAM and set chroma coefficient mode */ + REG_SET_2(SCL_MODE, scl_mode, + SCL_COEF_RAM_SELECT, !coef_ram_current, + SCL_CHROMA_COEF_MODE, chroma_coef_mode); + } + } +} + +static int dpp1_dscl_get_lb_depth_bpc(enum lb_pixel_depth depth) +{ + if (depth == LB_PIXEL_DEPTH_30BPP) + return 10; + else if (depth == LB_PIXEL_DEPTH_24BPP) + return 8; + else if (depth == LB_PIXEL_DEPTH_18BPP) + return 6; + else if (depth == LB_PIXEL_DEPTH_36BPP) + return 12; + else { + BREAK_TO_DEBUGGER(); + return -1; /* Unsupported */ + } +} + +void dpp1_dscl_calc_lb_num_partitions( + const struct scaler_data *scl_data, + enum lb_memory_config lb_config, + int *num_part_y, + int *num_part_c) +{ + int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a, + lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a; + + int line_size = scl_data->viewport.width < scl_data->recout.width ? + scl_data->viewport.width : scl_data->recout.width; + int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? + scl_data->viewport_c.width : scl_data->recout.width; + + if (line_size == 0) + line_size = 1; + + if (line_size_c == 0) + line_size_c = 1; + + + lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth); + memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */ + memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */ + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ + + if (lb_config == LB_MEMORY_CONFIG_1) { + lb_memory_size = 816; + lb_memory_size_c = 816; + lb_memory_size_a = 984; + } else if (lb_config == LB_MEMORY_CONFIG_2) { + lb_memory_size = 1088; + lb_memory_size_c = 1088; + lb_memory_size_a = 1312; + } else if (lb_config == LB_MEMORY_CONFIG_3) { + /* 420 mode: using 3rd mem from Y, Cr and Cb */ + lb_memory_size = 816 + 1088 + 848 + 848 + 848; + lb_memory_size_c = 816 + 1088; + lb_memory_size_a = 984 + 1312 + 456; + } else { + lb_memory_size = 816 + 1088 + 848; + lb_memory_size_c = 816 + 1088 + 848; + lb_memory_size_a = 984 + 1312 + 456; + } + *num_part_y = lb_memory_size / memory_line_size_y; + *num_part_c = lb_memory_size_c / memory_line_size_c; + num_partitions_a = lb_memory_size_a / memory_line_size_a; + + if (scl_data->lb_params.alpha_en + && (num_partitions_a < *num_part_y)) + *num_part_y = num_partitions_a; + + if (*num_part_y > 64) + *num_part_y = 64; + if (*num_part_c > 64) + *num_part_c = 64; + +} + +bool dpp1_dscl_is_lb_conf_valid(int ceil_vratio, int num_partitions, int vtaps) +{ + if (ceil_vratio > 2) + return vtaps <= (num_partitions - ceil_vratio + 2); + else + return vtaps <= num_partitions; +} + +/*find first match configuration which meets the min required lb size*/ +static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *dpp, + const struct scaler_data *scl_data) +{ + int num_part_y, num_part_c; + int vtaps = scl_data->taps.v_taps; + int vtaps_c = scl_data->taps.v_taps_c; + int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert); + int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c); + + if (dpp->base.ctx->dc->debug.use_max_lb) { + if (scl_data->format == PIXEL_FORMAT_420BPP8 + || scl_data->format == PIXEL_FORMAT_420BPP10) + return LB_MEMORY_CONFIG_3; + return LB_MEMORY_CONFIG_0; + } + + dpp->base.caps->dscl_calc_lb_num_partitions( + scl_data, LB_MEMORY_CONFIG_1, &num_part_y, &num_part_c); + + if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) + && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) + return LB_MEMORY_CONFIG_1; + + dpp->base.caps->dscl_calc_lb_num_partitions( + scl_data, LB_MEMORY_CONFIG_2, &num_part_y, &num_part_c); + + if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) + && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) + return LB_MEMORY_CONFIG_2; + + if (scl_data->format == PIXEL_FORMAT_420BPP8 + || scl_data->format == PIXEL_FORMAT_420BPP10) { + dpp->base.caps->dscl_calc_lb_num_partitions( + scl_data, LB_MEMORY_CONFIG_3, &num_part_y, &num_part_c); + + if (dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) + && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)) + return LB_MEMORY_CONFIG_3; + } + + dpp->base.caps->dscl_calc_lb_num_partitions( + scl_data, LB_MEMORY_CONFIG_0, &num_part_y, &num_part_c); + + /*Ensure we can support the requested number of vtaps*/ + ASSERT(dpp1_dscl_is_lb_conf_valid(ceil_vratio, num_part_y, vtaps) + && dpp1_dscl_is_lb_conf_valid(ceil_vratio_c, num_part_c, vtaps_c)); + + return LB_MEMORY_CONFIG_0; +} + + +static void dpp1_dscl_set_manual_ratio_init( + struct dcn10_dpp *dpp, const struct scaler_data *data) +{ + uint32_t init_frac = 0; + uint32_t init_int = 0; + + REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, + SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5); + + REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, + SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5); + + REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0, + SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5); + + REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0, + SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5); + + /* + * 0.24 format for fraction, first five bits zeroed + */ + init_frac = dc_fixpt_u0d19(data->inits.h) << 5; + init_int = dc_fixpt_floor(data->inits.h); + REG_SET_2(SCL_HORZ_FILTER_INIT, 0, + SCL_H_INIT_FRAC, init_frac, + SCL_H_INIT_INT, init_int); + + init_frac = dc_fixpt_u0d19(data->inits.h_c) << 5; + init_int = dc_fixpt_floor(data->inits.h_c); + REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0, + SCL_H_INIT_FRAC_C, init_frac, + SCL_H_INIT_INT_C, init_int); + + init_frac = dc_fixpt_u0d19(data->inits.v) << 5; + init_int = dc_fixpt_floor(data->inits.v); + REG_SET_2(SCL_VERT_FILTER_INIT, 0, + SCL_V_INIT_FRAC, init_frac, + SCL_V_INIT_INT, init_int); + + if (REG(SCL_VERT_FILTER_INIT_BOT)) { + struct fixed31_32 bot = dc_fixpt_add(data->inits.v, data->ratios.vert); + + init_frac = dc_fixpt_u0d19(bot) << 5; + init_int = dc_fixpt_floor(bot); + REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0, + SCL_V_INIT_FRAC_BOT, init_frac, + SCL_V_INIT_INT_BOT, init_int); + } + + init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5; + init_int = dc_fixpt_floor(data->inits.v_c); + REG_SET_2(SCL_VERT_FILTER_INIT_C, 0, + SCL_V_INIT_FRAC_C, init_frac, + SCL_V_INIT_INT_C, init_int); + + if (REG(SCL_VERT_FILTER_INIT_BOT_C)) { + struct fixed31_32 bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); + + init_frac = dc_fixpt_u0d19(bot) << 5; + init_int = dc_fixpt_floor(bot); + REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0, + SCL_V_INIT_FRAC_BOT_C, init_frac, + SCL_V_INIT_INT_BOT_C, init_int); + } +} + +/** + * dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area + * + * @dpp: DPP data struct + * @recout: Rectangle information + * + * This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on + * the values specified in the recount parameter. + * + * Note: This function only have effect if AutoCal is disabled. + */ +static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, + const struct rect *recout) +{ + REG_SET_2(RECOUT_START, 0, + /* First pixel of RECOUT in the active OTG area */ + RECOUT_START_X, recout->x, + /* First line of RECOUT in the active OTG area */ + RECOUT_START_Y, recout->y); + + REG_SET_2(RECOUT_SIZE, 0, + /* Number of RECOUT horizontal pixels */ + RECOUT_WIDTH, recout->width, + /* Number of RECOUT vertical lines */ + RECOUT_HEIGHT, recout->height); +} + +/** + * dpp1_dscl_set_scaler_manual_scale - Manually program scaler and line buffer + * + * @dpp_base: High level DPP struct + * @scl_data: scalaer_data info + * + * This is the primary function to program scaler and line buffer in manual + * scaling mode. To execute the required operations for manual scale, we need + * to disable AutoCal first. + */ +void dpp1_dscl_set_scaler_manual_scale(struct dpp *dpp_base, + const struct scaler_data *scl_data) +{ + enum lb_memory_config lb_config; + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode( + dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale); + bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN + && scl_data->format <= PIXEL_FORMAT_VIDEO_END; + + if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0) + return; + + PERF_TRACE(); + + dpp->scl_data = *scl_data; + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) { + if (dscl_mode != DSCL_MODE_DSCL_BYPASS) + dpp1_power_on_dscl(dpp_base, true); + } + + /* Autocal off */ + REG_SET_3(DSCL_AUTOCAL, 0, + AUTOCAL_MODE, AUTOCAL_MODE_OFF, + AUTOCAL_NUM_PIPE, 0, + AUTOCAL_PIPE_ID, 0); + + /*clean scaler boundary mode when Autocal off*/ + REG_SET(DSCL_CONTROL, 0, + SCL_BOUNDARY_MODE, 0); + + /* Recout */ + dpp1_dscl_set_recout(dpp, &scl_data->recout); + + /* MPC Size */ + REG_SET_2(MPC_SIZE, 0, + /* Number of horizontal pixels of MPC */ + MPC_WIDTH, scl_data->h_active, + /* Number of vertical lines of MPC */ + MPC_HEIGHT, scl_data->v_active); + + /* SCL mode */ + REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode); + + if (dscl_mode == DSCL_MODE_DSCL_BYPASS) { + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.dscl) + dpp1_power_on_dscl(dpp_base, false); + return; + } + + /* LB */ + lb_config = dpp1_dscl_find_lb_memory_config(dpp, scl_data); + dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config); + + if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) + return; + + /* Black offsets */ + if (REG(SCL_BLACK_OFFSET)) { + if (ycbcr) + REG_SET_2(SCL_BLACK_OFFSET, 0, + SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, + SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR); + else + + REG_SET_2(SCL_BLACK_OFFSET, 0, + SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, + SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y); + } + + /* Manually calculate scale ratio and init values */ + dpp1_dscl_set_manual_ratio_init(dpp, scl_data); + + /* HTaps/VTaps */ + REG_SET_4(SCL_TAP_CONTROL, 0, + SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1, + SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1, + SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1, + SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1); + + dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr); + PERF_TRACE(); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt new file mode 100644 index 000000000000..9c2d7096348e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/CMakeLists.txt @@ -0,0 +1,5 @@ +dal3_subdirectory_sources( + dcn20_dpp.c + dcn20_dpp_cm.c + dcn20_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c new file mode 100644 index 000000000000..56ebd7164dd7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c @@ -0,0 +1,435 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn20/dcn20_dpp.h" +#include "basics/conversion.h" + +#define NUM_PHASES 64 +#define HORZ_MAX_TAPS 8 +#define VERT_MAX_TAPS 8 + +#define BLACK_OFFSET_RGB_Y 0x0 +#define BLACK_OFFSET_CBCR 0x8000 + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +void dpp20_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_GET(DPP_CONTROL, + DPP_CLOCK_ENABLE, &s->is_enabled); + + // Degamma LUT (RAM) + REG_GET(CM_DGAM_CONTROL, + CM_DGAM_LUT_MODE, &s->dgam_lut_mode); + + // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) + REG_GET(CM_SHAPER_CONTROL, + CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); + REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_CONFIG_STATUS, &s->lut3d_mode, + CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_SIZE, &s->lut3d_size); + + // Blend/Out Gamma (RAM) + REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, + CM_BLNDGAM_CONFIG_STATUS, &s->rgam_lut_mode); +} + +void dpp2_power_on_obuf( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(CM_MEM_PWR_CTRL, SHARED_MEM_PWR_DIS, power_on == true ? 1:0); + + REG_UPDATE(OBUF_MEM_PWR_CTRL, + OBUF_MEM_PWR_FORCE, power_on == true ? 0:1); + + REG_UPDATE(DSCL_MEM_PWR_CTRL, + LUT_MEM_PWR_FORCE, power_on == true ? 0:1); +} + +void dpp2_dummy_program_input_lut( + struct dpp *dpp_base, + const struct dc_gamma *gamma) +{} + +static void dpp2_cnv_setup ( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + uint32_t pixel_format = 0; + uint32_t alpha_en = 1; + enum dc_color_space color_space = COLOR_SPACE_SRGB; + enum dcn20_input_csc_select select = DCN2_ICSC_SELECT_BYPASS; + bool force_disable_cursor = false; + struct out_csc_color_matrix tbl_entry; + uint32_t is_2bit = 0; + int i = 0; + + REG_SET_2(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode); + + //hardcode default + //FORMAT_CONTROL. FORMAT_CNV16 default 0: U0.16/S.1.15; 1: U1.15/ S.1.14 + //FORMAT_CONTROL. CNVC_BYPASS_MSB_ALIGN default 0: disabled 1: enabled + //FORMAT_CONTROL. CLAMP_POSITIVE default 0: disabled 1: enabled + //FORMAT_CONTROL. CLAMP_POSITIVE_C default 0: disabled 1: enabled + REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); + REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + pixel_format = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + pixel_format = 3; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + pixel_format = 8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + pixel_format = 10; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + force_disable_cursor = false; + pixel_format = 65; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + force_disable_cursor = true; + pixel_format = 64; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + force_disable_cursor = true; + pixel_format = 67; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + force_disable_cursor = true; + pixel_format = 66; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + pixel_format = 26; /* ARGB16161616_UNORM */ + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + pixel_format = 24; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + pixel_format = 25; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: + pixel_format = 12; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + pixel_format = 112; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + pixel_format = 113; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: + pixel_format = 114; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: + pixel_format = 115; + color_space = COLOR_SPACE_YCBCR709; + select = DCN2_ICSC_SELECT_ICSC_A; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + pixel_format = 118; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + pixel_format = 119; + alpha_en = 0; + break; + default: + break; + } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + + if (is_2bit == 1 && alpha_2bit_lut != NULL) { + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); + } + + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, pixel_format); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); + + // if input adjustments exist, program icsc with those values + if (input_csc_color_matrix.enable_adjustment + == true) { + for (i = 0; i < 12; i++) + tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; + + tbl_entry.color_space = input_color_space; + + if (color_space >= COLOR_SPACE_YCBCR601) + select = DCN2_ICSC_SELECT_ICSC_A; + else + select = DCN2_ICSC_SELECT_BYPASS; + + dpp2_program_input_csc(dpp_base, color_space, select, &tbl_entry); + } else + dpp2_program_input_csc(dpp_base, color_space, select, NULL); + + if (force_disable_cursor) { + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, 0); + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, 0); + + } + dpp2_power_on_obuf(dpp_base, true); + +} + +/*compute the maximum number of lines that we can fit in the line buffer*/ +void dscl2_calc_lb_num_partitions( + const struct scaler_data *scl_data, + enum lb_memory_config lb_config, + int *num_part_y, + int *num_part_c) +{ + int memory_line_size_y, memory_line_size_c, memory_line_size_a, + lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; + + int line_size = scl_data->viewport.width < scl_data->recout.width ? + scl_data->viewport.width : scl_data->recout.width; + int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? + scl_data->viewport_c.width : scl_data->recout.width; + + if (line_size == 0) + line_size = 1; + + if (line_size_c == 0) + line_size_c = 1; + + memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ + memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ + + if (lb_config == LB_MEMORY_CONFIG_1) { + lb_memory_size = 970; + lb_memory_size_c = 970; + lb_memory_size_a = 970; + } else if (lb_config == LB_MEMORY_CONFIG_2) { + lb_memory_size = 1290; + lb_memory_size_c = 1290; + lb_memory_size_a = 1290; + } else if (lb_config == LB_MEMORY_CONFIG_3) { + /* 420 mode: using 3rd mem from Y, Cr and Cb */ + lb_memory_size = 970 + 1290 + 484 + 484 + 484; + lb_memory_size_c = 970 + 1290; + lb_memory_size_a = 970 + 1290 + 484; + } else { + lb_memory_size = 970 + 1290 + 484; + lb_memory_size_c = 970 + 1290 + 484; + lb_memory_size_a = 970 + 1290 + 484; + } + *num_part_y = lb_memory_size / memory_line_size_y; + *num_part_c = lb_memory_size_c / memory_line_size_c; + num_partitions_a = lb_memory_size_a / memory_line_size_a; + + if (scl_data->lb_params.alpha_en + && (num_partitions_a < *num_part_y)) + *num_part_y = num_partitions_a; + + if (*num_part_y > 64) + *num_part_y = 64; + if (*num_part_c > 64) + *num_part_c = 64; +} + +void dpp2_cnv_set_alpha_keyer( + struct dpp *dpp_base, + struct cnv_color_keyer_params *color_keyer) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_EN, color_keyer->color_keyer_en); + + REG_UPDATE(COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, color_keyer->color_keyer_mode); + + REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, color_keyer->color_keyer_alpha_low); + REG_UPDATE(COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, color_keyer->color_keyer_alpha_high); + + REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, color_keyer->color_keyer_red_low); + REG_UPDATE(COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, color_keyer->color_keyer_red_high); + + REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, color_keyer->color_keyer_green_low); + REG_UPDATE(COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, color_keyer->color_keyer_green_high); + + REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, color_keyer->color_keyer_blue_low); + REG_UPDATE(COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, color_keyer->color_keyer_blue_high); +} + +void dpp2_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes) +{ + enum dc_cursor_color_format color_format = cursor_attributes->color_format; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + int cur_rom_en = 0; + + if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || + color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } + } + + REG_UPDATE_3(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0, + CUR0_ROM_EN, cur_rom_en); + + if (color_format == CURSOR_MODE_MONO) { + /* todo: clarify what to program these to */ + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } +} + +void oppn20_dummy_program_regamma_pwl( + struct dpp *dpp, + const struct pwl_params *params, + enum opp_regamma mode) +{} + +static struct dpp_funcs dcn20_dpp_funcs = { + .dpp_read_state = dpp20_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp2_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, + .dpp_set_degamma = dpp2_set_degamma, + .dpp_program_input_lut = dpp2_dummy_program_input_lut, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp2_cnv_setup, + .dpp_program_degamma_pwl = dpp2_set_degamma_pwl, + .dpp_program_blnd_lut = dpp20_program_blnd_lut, + .dpp_program_shaper_lut = dpp20_program_shaper, + .dpp_program_3dlut = dpp20_program_3dlut, + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp2_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, +}; + +static struct dpp_caps dcn20_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, + .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, +}; + +bool dpp2_construct( + struct dcn20_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn2_dpp_registers *tf_regs, + const struct dcn2_dpp_shift *tf_shift, + const struct dcn2_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn20_dpp_funcs; + dpp->base.caps = &dcn20_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + dpp->lb_pixel_depth_supported = + LB_PIXEL_DEPTH_18BPP | + LB_PIXEL_DEPTH_24BPP | + LB_PIXEL_DEPTH_30BPP | + LB_PIXEL_DEPTH_36BPP; + + dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; + dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/ + + return true; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h new file mode 100644 index 000000000000..672cde46c4b9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h @@ -0,0 +1,781 @@ +/* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN20_DPP_H__ +#define __DCN20_DPP_H__ + +#include "dcn10/dcn10_dpp.h" + +#define TO_DCN20_DPP(dpp)\ + container_of(dpp, struct dcn20_dpp, base) + +#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \ + SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id) + +#define TF_REG_LIST_DCN20_COMMON(id) \ + SRI(CM_BLNDGAM_CONTROL, CM, id), \ + SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL2_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL1_R, CM, id), \ + SRI(CM_BLNDGAM_RAMB_END_CNTL2_R, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_0_1, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_2_3, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_4_5, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_6_7, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_8_9, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_10_11, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_12_13, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_14_15, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_16_17, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_18_19, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_20_21, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_22_23, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_24_25, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_26_27, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_28_29, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_30_31, CM, id), \ + SRI(CM_BLNDGAM_RAMB_REGION_32_33, CM, id), \ + SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL2_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL1_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_END_CNTL2_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_0_1, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_2_3, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_4_5, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_6_7, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_8_9, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_10_11, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_12_13, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_14_15, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_16_17, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_18_19, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_20_21, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_22_23, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_24_25, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_26_27, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_28_29, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_30_31, CM, id), \ + SRI(CM_BLNDGAM_RAMA_REGION_32_33, CM, id), \ + SRI(CM_BLNDGAM_LUT_INDEX, CM, id), \ + SRI(CM_BLNDGAM_LUT_DATA, CM, id), \ + SRI(CM_3DLUT_MODE, CM, id), \ + SRI(CM_3DLUT_INDEX, CM, id), \ + SRI(CM_3DLUT_DATA, CM, id), \ + SRI(CM_3DLUT_DATA_30BIT, CM, id), \ + SRI(CM_3DLUT_READ_WRITE_CONTROL, CM, id), \ + SRI(CM_SHAPER_LUT_WRITE_EN_MASK, CM, id), \ + SRI(CM_SHAPER_CONTROL, CM, id), \ + SRI(CM_SHAPER_RAMB_START_CNTL_B, CM, id), \ + SRI(CM_SHAPER_RAMB_START_CNTL_G, CM, id), \ + SRI(CM_SHAPER_RAMB_START_CNTL_R, CM, id), \ + SRI(CM_SHAPER_RAMB_END_CNTL_B, CM, id), \ + SRI(CM_SHAPER_RAMB_END_CNTL_G, CM, id), \ + SRI(CM_SHAPER_RAMB_END_CNTL_R, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_0_1, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_2_3, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_4_5, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_6_7, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_8_9, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_10_11, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_12_13, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_14_15, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_16_17, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_18_19, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_20_21, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_22_23, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_24_25, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_26_27, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_28_29, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_30_31, CM, id), \ + SRI(CM_SHAPER_RAMB_REGION_32_33, CM, id), \ + SRI(CM_SHAPER_RAMA_START_CNTL_B, CM, id), \ + SRI(CM_SHAPER_RAMA_START_CNTL_G, CM, id), \ + SRI(CM_SHAPER_RAMA_START_CNTL_R, CM, id), \ + SRI(CM_SHAPER_RAMA_END_CNTL_B, CM, id), \ + SRI(CM_SHAPER_RAMA_END_CNTL_G, CM, id), \ + SRI(CM_SHAPER_RAMA_END_CNTL_R, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_0_1, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_2_3, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_4_5, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_6_7, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_8_9, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_10_11, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_12_13, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_14_15, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_16_17, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_18_19, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_20_21, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_22_23, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_24_25, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_26_27, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \ + SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \ + SRI(CM_SHAPER_LUT_INDEX, CM, id) + +#define TF_REG_LIST_DCN20_COMMON_APPEND(id) \ + SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ + SRI(CM_ICSC_B_C11_C12, CM, id), \ + SRI(CM_ICSC_B_C33_C34, CM, id) + +#define TF_REG_LIST_DCN20(id) \ + TF_REG_LIST_DCN(id), \ + TF_REG_LIST_DCN20_COMMON(id), \ + TF_REG_LIST_DCN20_COMMON_UPDATED(id), \ + SRI(CURSOR_CONTROL, CURSOR0_, id), \ + SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \ + SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \ + SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \ + SRI(COLOR_KEYER_RED, CNVC_CFG, id), \ + SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \ + SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \ + SRI(CM_SHAPER_LUT_DATA, CM, id), \ + SRI(CURSOR_CONTROL, CURSOR0_, id),\ + SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ + SRI(DSCL_MEM_PWR_CTRL, DSCL, id) + + +#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_2_3, CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_4_5, CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_6_7, CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_8_9, CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_10_11, CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_12_13, CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_14_15, CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_16_17, CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_18_19, CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_20_21, CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_22_23, CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_24_25, CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_26_27, CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_28_29, CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_30_31, CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_32_33, CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_2_3, CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_4_5, CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_6_7, CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_8_9, CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_10_11, CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_12_13, CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_14_15, CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_16_17, CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_18_19, CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_20_21, CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_22_23, CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_24_25, CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_26_27, CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_28_29, CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_30_31, CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \ + TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_SIZE, mask_sh), \ + TF_SF(CM0_CM_3DLUT_INDEX, CM_3DLUT_INDEX, mask_sh), \ + TF_SF(CM0_CM_3DLUT_DATA, CM_3DLUT_DATA0, mask_sh), \ + TF_SF(CM0_CM_3DLUT_DATA, CM_3DLUT_DATA1, mask_sh), \ + TF_SF(CM0_CM_3DLUT_DATA_30BIT, CM_3DLUT_DATA_30BIT, mask_sh), \ + TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \ + TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \ + TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \ + TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_G, CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_END_CNTL_R, CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_0_1, CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_2_3, CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_4_5, CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_6_7, CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_8_9, CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_10_11, CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_12_13, CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_14_15, CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_16_17, CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_18_19, CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_20_21, CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_22_23, CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_24_25, CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_26_27, CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_28_29, CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_30_31, CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMB_REGION_32_33, CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_START_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_START_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_START_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_START_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_B, CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_G, CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_END_CNTL_R, CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_0_1, CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_2_3, CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_4_5, CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_6_7, CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_8_9, CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_10_11, CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_12_13, CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_14_15, CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_16_17, CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_18_19, CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_20_21, CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_22_23, CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_24_25, CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_26_27, CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_28_29, CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_30_31, CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ + TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \ + TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \ + TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\ + TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ + TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) + +/* DPP CM debug status register: + * + * Status index including current ICSC, Gamut Remap Mode is 9 + * ICSC Mode: [4..3] + * Gamut Remap Mode: [10..9] + */ +#define CM_TEST_DEBUG_DATA_STATUS_IDX 9 + +#define TF_DEBUG_REG_LIST_SH_DCN20 \ + TF_DEBUG_REG_LIST_SH_DCN10, \ + .CM_TEST_DEBUG_DATA_ICSC_MODE = 3, \ + .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 9 + +#define TF_DEBUG_REG_LIST_MASK_DCN20 \ + TF_DEBUG_REG_LIST_MASK_DCN10, \ + .CM_TEST_DEBUG_DATA_ICSC_MODE = 0x18, \ + .CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE = 0x600 + +#define TF_REG_FIELD_LIST_DCN2_0(type) \ + TF_REG_FIELD_LIST(type) \ + type CM_BLNDGAM_LUT_DATA; \ + type CM_TEST_DEBUG_DATA_ICSC_MODE; \ + type CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE; \ + type FORMAT_CNV16; \ + type CNVC_BYPASS_MSB_ALIGN; \ + type CLAMP_POSITIVE; \ + type CLAMP_POSITIVE_C; \ + type ALPHA_2BIT_LUT0; \ + type ALPHA_2BIT_LUT1; \ + type ALPHA_2BIT_LUT2; \ + type ALPHA_2BIT_LUT3; \ + type FCNV_FP_BIAS_R; \ + type FCNV_FP_BIAS_G; \ + type FCNV_FP_BIAS_B; \ + type FCNV_FP_SCALE_R; \ + type FCNV_FP_SCALE_G; \ + type FCNV_FP_SCALE_B; \ + type COLOR_KEYER_EN; \ + type COLOR_KEYER_MODE; \ + type COLOR_KEYER_ALPHA_LOW; \ + type COLOR_KEYER_ALPHA_HIGH; \ + type COLOR_KEYER_RED_LOW; \ + type COLOR_KEYER_RED_HIGH; \ + type COLOR_KEYER_GREEN_LOW; \ + type COLOR_KEYER_GREEN_HIGH; \ + type COLOR_KEYER_BLUE_LOW; \ + type COLOR_KEYER_BLUE_HIGH; \ + type CUR0_PIX_INV_MODE; \ + type CUR0_PIXEL_ALPHA_MOD_EN; \ + type CUR0_ROM_EN;\ + type OBUF_MEM_PWR_FORCE + + +struct dcn2_dpp_shift { + TF_REG_FIELD_LIST_DCN2_0(uint8_t); +}; + +struct dcn2_dpp_mask { + TF_REG_FIELD_LIST_DCN2_0(uint32_t); +}; + +#define DPP_DCN2_REG_VARIABLE_LIST \ + DPP_COMMON_REG_VARIABLE_LIST \ + uint32_t CM_BLNDGAM_LUT_DATA; \ + uint32_t ALPHA_2BIT_LUT; \ + uint32_t FCNV_FP_BIAS_R; \ + uint32_t FCNV_FP_BIAS_G; \ + uint32_t FCNV_FP_BIAS_B; \ + uint32_t FCNV_FP_SCALE_R; \ + uint32_t FCNV_FP_SCALE_G; \ + uint32_t FCNV_FP_SCALE_B; \ + uint32_t COLOR_KEYER_CONTROL; \ + uint32_t COLOR_KEYER_ALPHA; \ + uint32_t COLOR_KEYER_RED; \ + uint32_t COLOR_KEYER_GREEN; \ + uint32_t COLOR_KEYER_BLUE; \ + uint32_t OBUF_MEM_PWR_CTRL + +#define DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND \ + uint32_t CM_GAMUT_REMAP_B_C11_C12; \ + uint32_t CM_GAMUT_REMAP_B_C13_C14; \ + uint32_t CM_GAMUT_REMAP_B_C21_C22; \ + uint32_t CM_GAMUT_REMAP_B_C23_C24; \ + uint32_t CM_GAMUT_REMAP_B_C31_C32; \ + uint32_t CM_GAMUT_REMAP_B_C33_C34; \ + uint32_t CM_ICSC_B_C11_C12; \ + uint32_t CM_ICSC_B_C33_C34 + +struct dcn2_dpp_registers { + DPP_DCN2_REG_VARIABLE_LIST; + DPP_DCN2_REG_VARIABLE_LIST_CM_APPEND; +}; + +struct dcn20_dpp { + struct dpp base; + + const struct dcn2_dpp_registers *tf_regs; + const struct dcn2_dpp_shift *tf_shift; + const struct dcn2_dpp_mask *tf_mask; + + const uint16_t *filter_v; + const uint16_t *filter_h; + const uint16_t *filter_v_c; + const uint16_t *filter_h_c; + int lb_pixel_depth_supported; + int lb_memory_size; + int lb_bits_per_entry; + bool is_write_to_ram_a_safe; + struct scaler_data scl_data; + struct pwl_params pwl_data; +}; + +enum dcn20_input_csc_select { + DCN2_ICSC_SELECT_BYPASS = 0, + DCN2_ICSC_SELECT_ICSC_A = 1, + DCN2_ICSC_SELECT_ICSC_B = 2 +}; + +enum dcn20_gamut_remap_select { + DCN2_GAMUT_REMAP_BYPASS = 0, + DCN2_GAMUT_REMAP_COEF_A = 1, + DCN2_GAMUT_REMAP_COEF_B = 2 +}; + +void dpp20_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s); + +void dpp2_set_degamma_pwl( + struct dpp *dpp_base, + const struct pwl_params *params); + +void dpp2_set_degamma( + struct dpp *dpp_base, + enum ipp_degamma_mode mode); + +void dpp2_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust); + +void dpp2_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn20_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry); + +bool dpp20_program_blnd_lut( + struct dpp *dpp_base, const struct pwl_params *params); + +bool dpp20_program_shaper( + struct dpp *dpp_base, + const struct pwl_params *params); + +bool dpp20_program_3dlut( + struct dpp *dpp_base, + struct tetrahedral_params *params); + +void dpp2_cnv_set_alpha_keyer( + struct dpp *dpp_base, + struct cnv_color_keyer_params *color_keyer); + +void dscl2_calc_lb_num_partitions( + const struct scaler_data *scl_data, + enum lb_memory_config lb_config, + int *num_part_y, + int *num_part_c); + +void dpp2_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes); + +void dpp2_dummy_program_input_lut( + struct dpp *dpp_base, + const struct dc_gamma *gamma); + +void oppn20_dummy_program_regamma_pwl( + struct dpp *dpp, + const struct pwl_params *params, + enum opp_regamma mode); + +void dpp2_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier); + +bool dpp2_construct(struct dcn20_dpp *dpp2, + struct dc_context *ctx, + uint32_t inst, + const struct dcn2_dpp_registers *tf_regs, + const struct dcn2_dpp_shift *tf_shift, + const struct dcn2_dpp_mask *tf_mask); + +void dpp2_power_on_obuf( + struct dpp *dpp_base, + bool power_on); + +void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); +#endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c new file mode 100644 index 000000000000..131a8de8e0f8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c @@ -0,0 +1,1202 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn20/dcn20_dpp.h" +#include "basics/conversion.h" + +#include "dcn10/dcn10_cm_common.h" + +#define REG(reg)\ + dpp->tf_regs->reg + +#define IND_REG(index) \ + (index) + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + + +static void dpp2_enable_cm_block( + struct dpp *dpp_base) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + unsigned int cm_bypass_mode = 0; + //Temp, put CM in bypass mode + if (dpp_base->ctx->dc->debug.cm_in_bypass) + cm_bypass_mode = 1; + + REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode); +} + + +static bool dpp2_degamma_ram_inuse( + struct dpp *dpp_base, + bool *ram_a_inuse) +{ + bool ret = false; + uint32_t status_reg = 0; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_GET(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, + &status_reg); + + if (status_reg == 3) { + *ram_a_inuse = true; + ret = true; + } else if (status_reg == 4) { + *ram_a_inuse = false; + ret = true; + } + return ret; +} + +static void dpp2_program_degamma_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num, + bool is_ram_a) +{ + uint32_t i; + + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, + CM_DGAM_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL, + is_ram_a == true ? 0:1); + + REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0); + for (i = 0 ; i < num; i++) { + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg); + + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_red_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_green_reg); + REG_SET(CM_DGAM_LUT_DATA, 0, + CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg); + + } + +} + +void dpp2_set_degamma_pwl( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + bool is_ram_a = true; + + dpp1_power_on_degamma_lut(dpp_base, true); + dpp2_enable_cm_block(dpp_base); + dpp2_degamma_ram_inuse(dpp_base, &is_ram_a); + if (is_ram_a == true) + dpp1_program_degamma_lutb_settings(dpp_base, params); + else + dpp1_program_degamma_luta_settings(dpp_base, params); + + dpp2_program_degamma_lut(dpp_base, params->rgb_resulted, params->hw_points_num, !is_ram_a); + dpp1_degamma_ram_select(dpp_base, !is_ram_a); +} + +void dpp2_set_degamma( + struct dpp *dpp_base, + enum ipp_degamma_mode mode) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + dpp2_enable_cm_block(dpp_base); + + switch (mode) { + case IPP_DEGAMMA_MODE_BYPASS: + /* Setting de gamma bypass for now */ + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0); + break; + case IPP_DEGAMMA_MODE_HW_sRGB: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1); + break; + case IPP_DEGAMMA_MODE_HW_xvYCC: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2); + break; + case IPP_DEGAMMA_MODE_USER_PWL: + REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3); + break; + default: + BREAK_TO_DEBUGGER(); + break; + } +} + +static void program_gamut_remap( + struct dcn20_dpp *dpp, + const uint16_t *regval, + enum dcn20_gamut_remap_select select) +{ + uint32_t cur_select = 0; + struct color_matrices_reg gam_regs; + + if (regval == NULL || select == DCN2_GAMUT_REMAP_BYPASS) { + REG_SET(CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, 0); + return; + } + + /* determine which gamut_remap coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the update so gamut_remap is updated on frame boundary + */ + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &cur_select); + + /* value stored in dbg reg will be 1 greater than mode we want */ + if (cur_select != DCN2_GAMUT_REMAP_COEF_A) + select = DCN2_GAMUT_REMAP_COEF_A; + else + select = DCN2_GAMUT_REMAP_COEF_B; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (select == DCN2_GAMUT_REMAP_COEF_A) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + } else { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET( + CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, select); + +} + +void dpp2_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + int i = 0; + + if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) + /* Bypass if type is bypass or hw */ + program_gamut_remap(dpp, NULL, DCN2_GAMUT_REMAP_BYPASS); + else { + struct fixed31_32 arr_matrix[12]; + uint16_t arr_reg_val[12]; + + for (i = 0; i < 12; i++) + arr_matrix[i] = adjust->temperature_matrix[i]; + + convert_float_matrix( + arr_reg_val, arr_matrix, 12); + + program_gamut_remap(dpp, arr_reg_val, DCN2_GAMUT_REMAP_COEF_A); + } +} + +static void read_gamut_remap(struct dcn20_dpp *dpp, + uint16_t *regval, + enum dcn20_gamut_remap_select *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_GAMUT_REMAP_MODE, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == DCN2_GAMUT_REMAP_COEF_A) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == DCN2_GAMUT_REMAP_COEF_B) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp2_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + uint16_t arr_reg_val[12]; + enum dcn20_gamut_remap_select select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == DCN2_GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} + +void dpp2_program_input_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn20_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + int i; + int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); + const uint16_t *regval = NULL; + uint32_t cur_select = 0; + enum dcn20_input_csc_select select; + struct color_matrices_reg icsc_regs; + + if (input_select == DCN2_ICSC_SELECT_BYPASS) { + REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); + return; + } + + if (tbl_entry == NULL) { + for (i = 0; i < arr_size; i++) + if (dpp_input_csc_matrix[i].color_space == color_space) { + regval = dpp_input_csc_matrix[i].regval; + break; + } + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + } else { + regval = tbl_entry->regval; + } + + /* determine which CSC coefficients (A or B) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + IX_REG_GET(CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_STATUS_IDX, + CM_TEST_DEBUG_DATA_ICSC_MODE, &cur_select); + + if (cur_select != DCN2_ICSC_SELECT_ICSC_A) + select = DCN2_ICSC_SELECT_ICSC_A; + else + select = DCN2_ICSC_SELECT_ICSC_B; + + icsc_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; + icsc_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; + icsc_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; + icsc_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; + + if (select == DCN2_ICSC_SELECT_ICSC_A) { + + icsc_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); + icsc_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); + + } else { + + icsc_regs.csc_c11_c12 = REG(CM_ICSC_B_C11_C12); + icsc_regs.csc_c33_c34 = REG(CM_ICSC_B_C33_C34); + + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &icsc_regs); + + REG_SET(CM_ICSC_CONTROL, 0, + CM_ICSC_MODE, select); +} + +static void dpp20_power_on_blnd_lut( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_SET(CM_MEM_PWR_CTRL, 0, + BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0:1); + +} + +static void dpp20_configure_blnd_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK, + CM_BLNDGAM_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_BLNDGAM_LUT_WRITE_EN_MASK, + CM_BLNDGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1); + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); +} + +static void dpp20_program_blnd_pwl( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + for (i = 0 ; i < num; i++) { + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); + + REG_SET(CM_BLNDGAM_LUT_DATA, 0, + CM_BLNDGAM_LUT_DATA, rgb[i].delta_red_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, + CM_BLNDGAM_LUT_DATA, rgb[i].delta_green_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, + CM_BLNDGAM_LUT_DATA, rgb[i].delta_blue_reg); + + } + +} + +static void dcn20_dpp_cm_get_reg_field( + struct dcn20_dpp *dpp, + struct xfer_func_reg *reg) +{ + reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B; + reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; + reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B; + reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B; + reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; +} + +/*program blnd lut RAM A*/ +static void dpp20_program_blnd_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dcn20_dpp_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R); + gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1); + gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33); + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +/*program blnd lut RAM B*/ +static void dpp20_program_blnd_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + struct xfer_func_reg gam_regs; + + dcn20_dpp_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R); + gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1); + gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33); + + cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +static enum dc_lut_mode dpp20_get_blndgam_current(struct dpp *dpp_base) +{ + enum dc_lut_mode mode; + uint32_t state_mode; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_GET(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, &state_mode); + + switch (state_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +bool dpp20_program_blnd_lut( + struct dpp *dpp_base, const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + if (params == NULL) { + REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, 0); + return false; + } + current_mode = dpp20_get_blndgam_current(dpp_base); + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + dpp20_power_on_blnd_lut(dpp_base, true); + dpp20_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + dpp20_program_blnd_luta_settings(dpp_base, params); + else + dpp20_program_blnd_lutb_settings(dpp_base, params); + + dpp20_program_blnd_pwl( + dpp_base, params->rgb_resulted, params->hw_points_num); + + REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_LUT_MODE, + next_mode == LUT_RAM_A ? 1:2); + + return true; +} + + +static void dpp20_program_shaper_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i, red, green, blue; + uint32_t red_delta, green_delta, blue_delta; + uint32_t red_value, green_value, blue_value; + + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + for (i = 0 ; i < num; i++) { + + red = rgb[i].red_reg; + green = rgb[i].green_reg; + blue = rgb[i].blue_reg; + + red_delta = rgb[i].delta_red_reg; + green_delta = rgb[i].delta_green_reg; + blue_delta = rgb[i].delta_blue_reg; + + red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); + green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); + blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); + + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value); + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value); + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value); + } + +} + +static enum dc_lut_mode dpp20_get_shaper_current(struct dpp *dpp_base) +{ + enum dc_lut_mode mode; + uint32_t state_mode; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_GET(CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, &state_mode); + + switch (state_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +static void dpp20_configure_shaper_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, + CM_SHAPER_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, + CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); + REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0); +} + +/*program shaper RAM A*/ + +static void dpp20_program_shaper_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + const struct gamma_curve *curve; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0, + CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0, + CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0); + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0, + CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0, + CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0, + CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0, + CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0, + CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0, + CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0, + CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0, + CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0, + CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0, + CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0, + CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0, + CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0, + CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0, + CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0, + CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0, + CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0, + CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0, + CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0, + CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0, + CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0, + CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); +} + +/*program shaper RAM B*/ +static void dpp20_program_shaper_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + const struct gamma_curve *curve; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0, + CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0, + CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0); + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0, + CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0, + CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0, + CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0, + CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0, + CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0, + CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0, + CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0, + CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0, + CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0, + CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0, + CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0, + CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0, + CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0, + CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0, + CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0, + CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0, + CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0, + CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0, + CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0, + CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0, + CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); + +} + + +bool dpp20_program_shaper( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + if (params == NULL) { + REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); + return false; + } + current_mode = dpp20_get_shaper_current(dpp_base); + + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + dpp20_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + dpp20_program_shaper_luta_settings(dpp_base, params); + else + dpp20_program_shaper_lutb_settings(dpp_base, params); + + dpp20_program_shaper_lut( + dpp_base, params->rgb_resulted, params->hw_points_num); + + REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); + + return true; + +} + +static enum dc_lut_mode get3dlut_config( + struct dpp *dpp_base, + bool *is_17x17x17, + bool *is_12bits_color_channel) +{ + uint32_t i_mode, i_enable_10bits, lut_size; + enum dc_lut_mode mode; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_GET_2(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_CONFIG_STATUS, &i_mode, + CM_3DLUT_30BIT_EN, &i_enable_10bits); + + switch (i_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + if (i_enable_10bits > 0) + *is_12bits_color_channel = false; + else + *is_12bits_color_channel = true; + + REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size); + + if (lut_size == 0) + *is_17x17x17 = true; + else + *is_17x17x17 = false; + + return mode; +} +/* + * select ramA or ramB, or bypass + * select color channel size 10 or 12 bits + * select 3dlut size 17x17x17 or 9x9x9 + */ +static void dpp20_set_3dlut_mode( + struct dpp *dpp_base, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + bool is_lut_size17x17x17) +{ + uint32_t lut_mode; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + if (mode == LUT_BYPASS) + lut_mode = 0; + else if (mode == LUT_RAM_A) + lut_mode = 1; + else + lut_mode = 2; + + REG_UPDATE_2(CM_3DLUT_MODE, + CM_3DLUT_MODE, lut_mode, + CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); +} + +static void dpp20_select_3dlut_ram( + struct dpp *dpp_base, + enum dc_lut_mode mode, + bool is_color_channel_12bits) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, + CM_3DLUT_30BIT_EN, + is_color_channel_12bits == true ? 0:1); +} + + + +static void dpp20_set3dlut_ram12( + struct dpp *dpp_base, + const struct dc_rgb *lut, + uint32_t entries) +{ + uint32_t i, red, green, blue, red1, green1, blue1; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + for (i = 0 ; i < entries; i += 2) { + red = lut[i].red<<4; + green = lut[i].green<<4; + blue = lut[i].blue<<4; + red1 = lut[i+1].red<<4; + green1 = lut[i+1].green<<4; + blue1 = lut[i+1].blue<<4; + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, red, + CM_3DLUT_DATA1, red1); + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, green, + CM_3DLUT_DATA1, green1); + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, blue, + CM_3DLUT_DATA1, blue1); + + } +} + +/* + * load selected lut with 10 bits color channels + */ +static void dpp20_set3dlut_ram10( + struct dpp *dpp_base, + const struct dc_rgb *lut, + uint32_t entries) +{ + uint32_t i, red, green, blue, value; + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + for (i = 0; i < entries; i++) { + red = lut[i].red; + green = lut[i].green; + blue = lut[i].blue; + + value = (red<<20) | (green<<10) | blue; + + REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value); + } + +} + + +static void dpp20_select_3dlut_ram_mask( + struct dpp *dpp_base, + uint32_t ram_selection_mask) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, + ram_selection_mask); + REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); +} + +bool dpp20_program_3dlut( + struct dpp *dpp_base, + struct tetrahedral_params *params) +{ + enum dc_lut_mode mode; + bool is_17x17x17; + bool is_12bits_color_channel; + struct dc_rgb *lut0; + struct dc_rgb *lut1; + struct dc_rgb *lut2; + struct dc_rgb *lut3; + int lut_size0; + int lut_size; + + if (params == NULL) { + dpp20_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); + return false; + } + mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel); + + if (mode == LUT_BYPASS || mode == LUT_RAM_B) + mode = LUT_RAM_A; + else + mode = LUT_RAM_B; + + is_17x17x17 = !params->use_tetrahedral_9; + is_12bits_color_channel = params->use_12bits; + if (is_17x17x17) { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + lut_size0 = sizeof(params->tetrahedral_17.lut0)/ + sizeof(params->tetrahedral_17.lut0[0]); + lut_size = sizeof(params->tetrahedral_17.lut1)/ + sizeof(params->tetrahedral_17.lut1[0]); + } else { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + lut_size0 = sizeof(params->tetrahedral_9.lut0)/ + sizeof(params->tetrahedral_9.lut0[0]); + lut_size = sizeof(params->tetrahedral_9.lut1)/ + sizeof(params->tetrahedral_9.lut1[0]); + } + + dpp20_select_3dlut_ram(dpp_base, mode, + is_12bits_color_channel); + dpp20_select_3dlut_ram_mask(dpp_base, 0x1); + if (is_12bits_color_channel) + dpp20_set3dlut_ram12(dpp_base, lut0, lut_size0); + else + dpp20_set3dlut_ram10(dpp_base, lut0, lut_size0); + + dpp20_select_3dlut_ram_mask(dpp_base, 0x2); + if (is_12bits_color_channel) + dpp20_set3dlut_ram12(dpp_base, lut1, lut_size); + else + dpp20_set3dlut_ram10(dpp_base, lut1, lut_size); + + dpp20_select_3dlut_ram_mask(dpp_base, 0x4); + if (is_12bits_color_channel) + dpp20_set3dlut_ram12(dpp_base, lut2, lut_size); + else + dpp20_set3dlut_ram10(dpp_base, lut2, lut_size); + + dpp20_select_3dlut_ram_mask(dpp_base, 0x8); + if (is_12bits_color_channel) + dpp20_set3dlut_ram12(dpp_base, lut3, lut_size); + else + dpp20_set3dlut_ram10(dpp_base, lut3, lut_size); + + + dpp20_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel, + is_17x17x17); + + return true; +} + +void dpp2_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier) +{ + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt new file mode 100644 index 000000000000..7711cd3c47a7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn201_dpp.c + dcn201_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c new file mode 100644 index 000000000000..345202fee40f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c @@ -0,0 +1,313 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" + +#include "core_types.h" + +#include "reg_helper.h" +#include "dcn201/dcn201_dpp.h" +#include "basics/conversion.h" + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +static void dpp201_cnv_setup( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut) +{ + struct dcn201_dpp *dpp = TO_DCN201_DPP(dpp_base); + uint32_t pixel_format = 0; + uint32_t alpha_en = 1; + enum dc_color_space color_space = COLOR_SPACE_SRGB; + enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; + bool force_disable_cursor = false; + uint32_t is_2bit = 0; + + REG_SET_2(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode); + + REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); + REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + pixel_format = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + pixel_format = 3; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + pixel_format = 8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + pixel_format = 10; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + force_disable_cursor = false; + pixel_format = 65; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + force_disable_cursor = true; + pixel_format = 64; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + force_disable_cursor = true; + pixel_format = 67; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + force_disable_cursor = true; + pixel_format = 66; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + pixel_format = 22; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + pixel_format = 24; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + pixel_format = 25; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: + pixel_format = 12; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + pixel_format = 112; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + pixel_format = 113; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: + pixel_format = 114; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: + pixel_format = 115; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + pixel_format = 118; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + pixel_format = 119; + alpha_en = 0; + break; + default: + break; + } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + + if (is_2bit == 1 && alpha_2bit_lut != NULL) { + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); + } + + REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, pixel_format); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); + + dpp1_program_input_csc(dpp_base, color_space, select, NULL); + + if (force_disable_cursor) { + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, 0); + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, 0); + } + dpp2_power_on_obuf(dpp_base, true); +} + +#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) + +static bool dpp201_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps) +{ + if (scl_data->viewport.width != scl_data->h_active && + scl_data->viewport.height != scl_data->v_active && + dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && + scl_data->format == PIXEL_FORMAT_FP16) + return false; + + if (scl_data->viewport.width > scl_data->h_active && + dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; + + if (scl_data->ratios.horz.value == (8ll << 32)) + scl_data->ratios.horz.value--; + if (scl_data->ratios.vert.value == (8ll << 32)) + scl_data->ratios.vert.value--; + if (scl_data->ratios.horz_c.value == (8ll << 32)) + scl_data->ratios.horz_c.value--; + if (scl_data->ratios.vert_c.value == (8ll << 32)) + scl_data->ratios.vert_c.value--; + + if (in_taps->h_taps == 0) { + if (dc_fixpt_ceil(scl_data->ratios.horz) > 4) + scl_data->taps.h_taps = 8; + else + scl_data->taps.h_taps = 4; + } else + scl_data->taps.h_taps = in_taps->h_taps; + + if (in_taps->v_taps == 0) { + if (dc_fixpt_ceil(scl_data->ratios.vert) > 4) + scl_data->taps.v_taps = 8; + else + scl_data->taps.v_taps = 4; + } else + scl_data->taps.v_taps = in_taps->v_taps; + if (in_taps->v_taps_c == 0) { + if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4) + scl_data->taps.v_taps_c = 4; + else + scl_data->taps.v_taps_c = 2; + } else + scl_data->taps.v_taps_c = in_taps->v_taps_c; + if (in_taps->h_taps_c == 0) { + if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4) + scl_data->taps.h_taps_c = 4; + else + scl_data->taps.h_taps_c = 2; + } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; + else + scl_data->taps.h_taps_c = in_taps->h_taps_c; + + if (!dpp->ctx->dc->debug.always_scale) { + if (IDENTITY_RATIO(scl_data->ratios.horz)) + scl_data->taps.h_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert)) + scl_data->taps.v_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.horz_c)) + scl_data->taps.h_taps_c = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert_c)) + scl_data->taps.v_taps_c = 1; + } + + return true; +} + +static struct dpp_funcs dcn201_dpp_funcs = { + .dpp_read_state = dpp20_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp201_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl, + .dpp_set_degamma = dpp2_set_degamma, + .dpp_program_input_lut = dpp2_dummy_program_input_lut, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp201_cnv_setup, + .dpp_program_degamma_pwl = dpp2_set_degamma_pwl, + .dpp_program_blnd_lut = dpp20_program_blnd_lut, + .dpp_program_shaper_lut = dpp20_program_shaper, + .dpp_program_3dlut = dpp20_program_3dlut, + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp2_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp2_cm_get_gamut_remap, +}; + +static struct dpp_caps dcn201_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, + .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, +}; + +bool dpp201_construct( + struct dcn201_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn201_dpp_registers *tf_regs, + const struct dcn201_dpp_shift *tf_shift, + const struct dcn201_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn201_dpp_funcs; + dpp->base.caps = &dcn201_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + dpp->lb_pixel_depth_supported = + LB_PIXEL_DEPTH_18BPP | + LB_PIXEL_DEPTH_24BPP | + LB_PIXEL_DEPTH_30BPP; + + dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY; + dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h new file mode 100644 index 000000000000..cbd5b47b4acf --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h @@ -0,0 +1,83 @@ +/* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN201_DPP_H__ +#define __DCN201_DPP_H__ + +#include "dcn20/dcn20_dpp.h" + +#define TO_DCN201_DPP(dpp)\ + container_of(dpp, struct dcn201_dpp, base) + +#define TF_REG_LIST_DCN201(id) \ + TF_REG_LIST_DCN20(id) + +#define TF_REG_LIST_SH_MASK_DCN201(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN20(mask_sh) + +#define TF_REG_FIELD_LIST_DCN201(type) \ + TF_REG_FIELD_LIST_DCN2_0(type) + +struct dcn201_dpp_shift { + TF_REG_FIELD_LIST_DCN201(uint8_t); +}; + +struct dcn201_dpp_mask { + TF_REG_FIELD_LIST_DCN201(uint32_t); +}; + +#define DPP_DCN201_REG_VARIABLE_LIST \ + DPP_DCN2_REG_VARIABLE_LIST + +struct dcn201_dpp_registers { + DPP_DCN201_REG_VARIABLE_LIST; +}; + +struct dcn201_dpp { + struct dpp base; + + const struct dcn201_dpp_registers *tf_regs; + const struct dcn201_dpp_shift *tf_shift; + const struct dcn201_dpp_mask *tf_mask; + + const uint16_t *filter_v; + const uint16_t *filter_h; + const uint16_t *filter_v_c; + const uint16_t *filter_h_c; + int lb_pixel_depth_supported; + int lb_memory_size; + int lb_bits_per_entry; + bool is_write_to_ram_a_safe; + struct scaler_data scl_data; + struct pwl_params pwl_data; +}; + +bool dpp201_construct(struct dcn201_dpp *dpp2, + struct dc_context *ctx, + uint32_t inst, + const struct dcn201_dpp_registers *tf_regs, + const struct dcn201_dpp_shift *tf_shift, + const struct dcn201_dpp_mask *tf_mask); + +#endif /* __DC_HWSS_DCN201_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt new file mode 100644 index 000000000000..0faee2a1e32b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/CMakeLists.txt @@ -0,0 +1,5 @@ +dal3_subdirectory_sources( + dcn30_dpp.c + dcn30_dpp_cm.c + dcn30_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c new file mode 100644 index 000000000000..0c31b12cacaf --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -0,0 +1,1527 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "core_types.h" +#include "reg_helper.h" +#include "dcn30/dcn30_dpp.h" +#include "basics/conversion.h" +#include "dcn30/dcn30_cm_common.h" + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + + +void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint32_t gamcor_lut_mode, rgam_lut_mode; + + REG_GET(DPP_CONTROL, + DPP_CLOCK_ENABLE, &s->is_enabled); + + // Pre-degamma (ROM) + REG_GET_2(PRE_DEGAM, + PRE_DEGAM_MODE, &s->pre_dgam_mode, + PRE_DEGAM_SELECT, &s->pre_dgam_select); + + // Gamma Correction (RAM) + REG_GET(CM_GAMCOR_CONTROL, + CM_GAMCOR_MODE_CURRENT, &s->gamcor_mode); + if (s->gamcor_mode) { + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &gamcor_lut_mode); + if (!gamcor_lut_mode) + s->gamcor_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B + } + + // Shaper LUT (RAM), 3D LUT (mode, bit-depth, size) + REG_GET(CM_SHAPER_CONTROL, + CM_SHAPER_LUT_MODE, &s->shaper_lut_mode); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_MODE_CURRENT, &s->lut3d_mode); + REG_GET(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_30BIT_EN, &s->lut3d_bit_depth); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_SIZE, &s->lut3d_size); + + // Blend/Out Gamma (RAM) + REG_GET(CM_BLNDGAM_CONTROL, + CM_BLNDGAM_MODE_CURRENT, &s->rgam_lut_mode); + if (s->rgam_lut_mode){ + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &rgam_lut_mode); + if (!rgam_lut_mode) + s->rgam_lut_mode = LUT_RAM_A; // Otherwise, LUT_RAM_B + } +} + +/*program post scaler scs block in dpp CM*/ +void dpp3_program_post_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn10_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + int i; + int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix); + const uint16_t *regval = NULL; + uint32_t cur_select = 0; + enum dcn10_input_csc_select select; + struct color_matrices_reg gam_regs; + + if (input_select == INPUT_CSC_SELECT_BYPASS) { + REG_SET(CM_POST_CSC_CONTROL, 0, CM_POST_CSC_MODE, 0); + return; + } + + if (tbl_entry == NULL) { + for (i = 0; i < arr_size; i++) + if (dpp_input_csc_matrix[i].color_space == color_space) { + regval = dpp_input_csc_matrix[i].regval; + break; + } + + if (regval == NULL) { + BREAK_TO_DEBUGGER(); + return; + } + } else { + regval = tbl_entry->regval; + } + + /* determine which CSC matrix (icsc or coma) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + REG_GET(CM_POST_CSC_CONTROL, + CM_POST_CSC_MODE_CURRENT, &cur_select); + + if (cur_select != INPUT_CSC_SELECT_ICSC) + select = INPUT_CSC_SELECT_ICSC; + else + select = INPUT_CSC_SELECT_COMA; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_POST_CSC_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_POST_CSC_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_POST_CSC_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_POST_CSC_C12; + + if (select == INPUT_CSC_SELECT_ICSC) { + + gam_regs.csc_c11_c12 = REG(CM_POST_CSC_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_POST_CSC_C33_C34); + + } else { + + gam_regs.csc_c11_c12 = REG(CM_POST_CSC_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_POST_CSC_B_C33_C34); + + } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET(CM_POST_CSC_CONTROL, 0, + CM_POST_CSC_MODE, select); +} + + +/*CNVC degam unit has read only LUTs*/ +void dpp3_set_pre_degam(struct dpp *dpp_base, enum dc_transfer_func_predefined tr) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + int pre_degam_en = 1; + int degamma_lut_selection = 0; + + switch (tr) { + case TRANSFER_FUNCTION_LINEAR: + case TRANSFER_FUNCTION_UNITY: + pre_degam_en = 0; //bypass + break; + case TRANSFER_FUNCTION_SRGB: + degamma_lut_selection = 0; + break; + case TRANSFER_FUNCTION_BT709: + degamma_lut_selection = 4; + break; + case TRANSFER_FUNCTION_PQ: + degamma_lut_selection = 5; + break; + case TRANSFER_FUNCTION_HLG: + degamma_lut_selection = 6; + break; + case TRANSFER_FUNCTION_GAMMA22: + degamma_lut_selection = 1; + break; + case TRANSFER_FUNCTION_GAMMA24: + degamma_lut_selection = 2; + break; + case TRANSFER_FUNCTION_GAMMA26: + degamma_lut_selection = 3; + break; + default: + pre_degam_en = 0; + break; + } + + REG_SET_2(PRE_DEGAM, 0, + PRE_DEGAM_MODE, pre_degam_en, + PRE_DEGAM_SELECT, degamma_lut_selection); +} + +void dpp3_cnv_setup ( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint32_t pixel_format = 0; + uint32_t alpha_en = 1; + enum dc_color_space color_space = COLOR_SPACE_SRGB; + enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS; + bool force_disable_cursor = false; + uint32_t is_2bit = 0; + uint32_t alpha_plane_enable = 0; + uint32_t dealpha_en = 0, dealpha_ablnd_en = 0; + uint32_t realpha_en = 0, realpha_ablnd_en = 0; + uint32_t program_prealpha_dealpha = 0; + struct out_csc_color_matrix tbl_entry; + int i; + + REG_SET_2(FORMAT_CONTROL, 0, + CNVC_BYPASS, 0, + FORMAT_EXPANSION_MODE, mode); + + REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0); + REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0); + REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0); + + REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_R, 0); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_G, 1); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CROSSBAR_B, 2); + + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + pixel_format = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + pixel_format = 3; + alpha_en = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + pixel_format = 8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + pixel_format = 10; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + force_disable_cursor = false; + pixel_format = 65; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + force_disable_cursor = true; + pixel_format = 64; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + force_disable_cursor = true; + pixel_format = 67; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + force_disable_cursor = true; + pixel_format = 66; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + pixel_format = 26; /* ARGB16161616_UNORM */ + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + pixel_format = 24; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + pixel_format = 25; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888: + pixel_format = 12; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: + pixel_format = 112; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: + pixel_format = 113; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: + pixel_format = 114; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102: + pixel_format = 115; + color_space = COLOR_SPACE_YCBCR709; + select = INPUT_CSC_SELECT_ICSC; + is_2bit = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE: + pixel_format = 116; + alpha_plane_enable = 0; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + pixel_format = 116; + alpha_plane_enable = 1; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: + pixel_format = 118; + break; + case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: + pixel_format = 119; + break; + default: + break; + } + + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + + if (is_2bit == 1 && alpha_2bit_lut != NULL) { + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2); + REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3); + } + + REG_SET_2(CNVC_SURFACE_PIXEL_FORMAT, 0, + CNVC_SURFACE_PIXEL_FORMAT, pixel_format, + CNVC_ALPHA_PLANE_ENABLE, alpha_plane_enable); + REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en); + + if (program_prealpha_dealpha) { + dealpha_en = 1; + realpha_en = 1; + } + REG_SET_2(PRE_DEALPHA, 0, + PRE_DEALPHA_EN, dealpha_en, + PRE_DEALPHA_ABLND_EN, dealpha_ablnd_en); + REG_SET_2(PRE_REALPHA, 0, + PRE_REALPHA_EN, realpha_en, + PRE_REALPHA_ABLND_EN, realpha_ablnd_en); + + /* If input adjustment exists, program the ICSC with those values. */ + if (input_csc_color_matrix.enable_adjustment == true) { + for (i = 0; i < 12; i++) + tbl_entry.regval[i] = input_csc_color_matrix.matrix[i]; + + tbl_entry.color_space = input_color_space; + + if (color_space >= COLOR_SPACE_YCBCR601) + select = INPUT_CSC_SELECT_ICSC; + else + select = INPUT_CSC_SELECT_BYPASS; + + dpp3_program_post_csc(dpp_base, color_space, select, + &tbl_entry); + } else { + dpp3_program_post_csc(dpp_base, color_space, select, NULL); + } + + if (force_disable_cursor) { + REG_UPDATE(CURSOR_CONTROL, + CURSOR_ENABLE, 0); + REG_UPDATE(CURSOR0_CONTROL, + CUR0_ENABLE, 0); + } +} + +#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19)) + +void dpp3_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes) +{ + enum dc_cursor_color_format color_format = cursor_attributes->color_format; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + int cur_rom_en = 0; + + if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || + color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } + } + + REG_UPDATE_3(CURSOR0_CONTROL, + CUR0_MODE, color_format, + CUR0_EXPANSION_MODE, 0, + CUR0_ROM_EN, cur_rom_en); + + if (color_format == CURSOR_MODE_MONO) { + /* todo: clarify what to program these to */ + REG_UPDATE(CURSOR0_COLOR0, + CUR0_COLOR0, 0x00000000); + REG_UPDATE(CURSOR0_COLOR1, + CUR0_COLOR1, 0xFFFFFFFF); + } + + dpp_base->att.cur0_ctl.bits.expansion_mode = 0; + dpp_base->att.cur0_ctl.bits.cur0_rom_en = cur_rom_en; + dpp_base->att.cur0_ctl.bits.mode = color_format; +} + + +bool dpp3_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps) +{ + int num_part_y, num_part_c; + int max_taps_y, max_taps_c; + int min_taps_y, min_taps_c; + enum lb_memory_config lb_config; + + if (scl_data->viewport.width > scl_data->h_active && + dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; + + /* + * Set default taps if none are provided + * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling + * taps = 4 for upscaling + */ + if (in_taps->h_taps == 0) { + if (dc_fixpt_ceil(scl_data->ratios.horz) > 1) + scl_data->taps.h_taps = min(2 * dc_fixpt_ceil(scl_data->ratios.horz), 8); + else + scl_data->taps.h_taps = 4; + } else + scl_data->taps.h_taps = in_taps->h_taps; + if (in_taps->v_taps == 0) { + if (dc_fixpt_ceil(scl_data->ratios.vert) > 1) + scl_data->taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert, 2)), 8); + else + scl_data->taps.v_taps = 4; + } else + scl_data->taps.v_taps = in_taps->v_taps; + if (in_taps->v_taps_c == 0) { + if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 1) + scl_data->taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int(scl_data->ratios.vert_c, 2)), 8); + else + scl_data->taps.v_taps_c = 4; + } else + scl_data->taps.v_taps_c = in_taps->v_taps_c; + if (in_taps->h_taps_c == 0) { + if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 1) + scl_data->taps.h_taps_c = min(2 * dc_fixpt_ceil(scl_data->ratios.horz_c), 8); + else + scl_data->taps.h_taps_c = 4; + } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1) + /* Only 1 and even h_taps_c are supported by hw */ + scl_data->taps.h_taps_c = in_taps->h_taps_c - 1; + else + scl_data->taps.h_taps_c = in_taps->h_taps_c; + + /*Ensure we can support the requested number of vtaps*/ + min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert); + min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c); + + /* Use LB_MEMORY_CONFIG_3 for 4:2:0 */ + if ((scl_data->format == PIXEL_FORMAT_420BPP8) || (scl_data->format == PIXEL_FORMAT_420BPP10)) + lb_config = LB_MEMORY_CONFIG_3; + else + lb_config = LB_MEMORY_CONFIG_0; + + dpp->caps->dscl_calc_lb_num_partitions( + scl_data, lb_config, &num_part_y, &num_part_c); + + /* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */ + if (dc_fixpt_ceil(scl_data->ratios.vert) > 2) + max_taps_y = num_part_y - (dc_fixpt_ceil(scl_data->ratios.vert) - 2); + else + max_taps_y = num_part_y; + + if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 2) + max_taps_c = num_part_c - (dc_fixpt_ceil(scl_data->ratios.vert_c) - 2); + else + max_taps_c = num_part_c; + + if (max_taps_y < min_taps_y) + return false; + else if (max_taps_c < min_taps_c) + return false; + + if (scl_data->taps.v_taps > max_taps_y) + scl_data->taps.v_taps = max_taps_y; + + if (scl_data->taps.v_taps_c > max_taps_c) + scl_data->taps.v_taps_c = max_taps_c; + + if (!dpp->ctx->dc->debug.always_scale) { + if (IDENTITY_RATIO(scl_data->ratios.horz)) + scl_data->taps.h_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert)) + scl_data->taps.v_taps = 1; + if (IDENTITY_RATIO(scl_data->ratios.horz_c)) + scl_data->taps.h_taps_c = 1; + if (IDENTITY_RATIO(scl_data->ratios.vert_c)) + scl_data->taps.v_taps_c = 1; + } + + return true; +} + +static void dpp3_deferred_update(struct dpp *dpp_base) +{ + int bypass_state; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->deferred_reg_writes.bits.disable_dscl) { + REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3); + dpp_base->deferred_reg_writes.bits.disable_dscl = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_gamcor) { + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_gamcor = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_blnd_lut) { + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_blnd_lut = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_3dlut) { + REG_GET(CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_3dlut = false; + } + + if (dpp_base->deferred_reg_writes.bits.disable_shaper) { + REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &bypass_state); + if (bypass_state == 0) { // only program if bypass was latched + REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 3); + } else + ASSERT(0); // LUT select was updated again before vupdate + dpp_base->deferred_reg_writes.bits.disable_shaper = false; + } +} + +static void dpp3_power_on_blnd_lut( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 0); + REG_WAIT(CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true; + } + } else { + REG_SET(CM_MEM_PWR_CTRL, 0, + BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1); + } +} + +static void dpp3_power_on_hdr3dlut( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 0); + REG_WAIT(CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_3dlut = true; + } + } +} + +static void dpp3_power_on_shaper( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 0); + REG_WAIT(CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_shaper = true; + } + } +} + +static void dpp3_configure_blnd_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE_2(CM_BLNDGAM_LUT_CONTROL, + CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 7, + CM_BLNDGAM_LUT_HOST_SEL, is_ram_a == true ? 0 : 1); + + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); +} + +static void dpp3_program_blnd_pwl( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; + uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; + uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; + + if (is_rgb_equal(rgb, num)) { + for (i = 0 ; i < num; i++) + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); + } else { + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); + REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4); + for (i = 0 ; i < num; i++) + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red); + + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); + REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2); + for (i = 0 ; i < num; i++) + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green); + + REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0); + REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1); + for (i = 0 ; i < num; i++) + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg); + REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_blue); + } +} + +static void dcn3_dpp_cm_get_reg_field( + struct dcn3_dpp *dpp, + struct dcn3_xfer_func_reg *reg) +{ + reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_B; + reg->masks.field_region_end = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; + reg->masks.field_region_linear_slope = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; + reg->shifts.exp_region_start = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_B; + reg->masks.exp_region_start = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B; +} + +/*program blnd lut RAM A*/ +static void dpp3_program_blnd_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + struct dcn3_xfer_func_reg gam_regs; + + dcn3_dpp_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMA_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMA_END_CNTL2_R); + gam_regs.region_start = REG(CM_BLNDGAM_RAMA_REGION_0_1); + gam_regs.region_end = REG(CM_BLNDGAM_RAMA_REGION_32_33); + + cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +/*program blnd lut RAM B*/ +static void dpp3_program_blnd_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + struct dcn3_xfer_func_reg gam_regs; + + dcn3_dpp_cm_get_reg_field(dpp, &gam_regs); + + gam_regs.start_cntl_b = REG(CM_BLNDGAM_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_BLNDGAM_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_BLNDGAM_RAMB_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_BLNDGAM_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_BLNDGAM_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_BLNDGAM_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_BLNDGAM_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_BLNDGAM_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_BLNDGAM_RAMB_END_CNTL2_R); + gam_regs.region_start = REG(CM_BLNDGAM_RAMB_REGION_0_1); + gam_regs.region_end = REG(CM_BLNDGAM_RAMB_REGION_32_33); + + cm_helper_program_gamcor_xfer_func(dpp->base.ctx, params, &gam_regs); +} + +static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base) +{ + enum dc_lut_mode mode; + uint32_t mode_current = 0; + uint32_t in_use = 0; + + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &mode_current); + REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, &in_use); + + switch (mode_current) { + case 0: + case 1: + mode = LUT_BYPASS; + break; + + case 2: + if (in_use == 0) + mode = LUT_RAM_A; + else + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +static bool dpp3_program_blnd_lut(struct dpp *dpp_base, + const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (params == NULL) { + REG_SET(CM_BLNDGAM_CONTROL, 0, CM_BLNDGAM_MODE, 0); + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_blnd_lut(dpp_base, false); + return false; + } + + current_mode = dpp3_get_blndgam_current(dpp_base); + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_B) + next_mode = LUT_RAM_A; + else + next_mode = LUT_RAM_B; + + dpp3_power_on_blnd_lut(dpp_base, true); + dpp3_configure_blnd_lut(dpp_base, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + dpp3_program_blnd_luta_settings(dpp_base, params); + else + dpp3_program_blnd_lutb_settings(dpp_base, params); + + dpp3_program_blnd_pwl( + dpp_base, params->rgb_resulted, params->hw_points_num); + + REG_UPDATE_2(CM_BLNDGAM_CONTROL, + CM_BLNDGAM_MODE, 2, + CM_BLNDGAM_SELECT, next_mode == LUT_RAM_A ? 0 : 1); + + return true; +} + + +static void dpp3_program_shaper_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num) +{ + uint32_t i, red, green, blue; + uint32_t red_delta, green_delta, blue_delta; + uint32_t red_value, green_value, blue_value; + + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + for (i = 0 ; i < num; i++) { + + red = rgb[i].red_reg; + green = rgb[i].green_reg; + blue = rgb[i].blue_reg; + + red_delta = rgb[i].delta_red_reg; + green_delta = rgb[i].delta_green_reg; + blue_delta = rgb[i].delta_blue_reg; + + red_value = ((red_delta & 0x3ff) << 14) | (red & 0x3fff); + green_value = ((green_delta & 0x3ff) << 14) | (green & 0x3fff); + blue_value = ((blue_delta & 0x3ff) << 14) | (blue & 0x3fff); + + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, red_value); + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, green_value); + REG_SET(CM_SHAPER_LUT_DATA, 0, CM_SHAPER_LUT_DATA, blue_value); + } + +} + +static enum dc_lut_mode dpp3_get_shaper_current(struct dpp *dpp_base) +{ + enum dc_lut_mode mode; + uint32_t state_mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &state_mode); + + switch (state_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + + return mode; +} + +static void dpp3_configure_shaper_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, + CM_SHAPER_LUT_WRITE_EN_MASK, 7); + REG_UPDATE(CM_SHAPER_LUT_WRITE_EN_MASK, + CM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1); + REG_SET(CM_SHAPER_LUT_INDEX, 0, CM_SHAPER_LUT_INDEX, 0); +} + +/*program shaper RAM A*/ + +static void dpp3_program_shaper_luta_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + const struct gamma_curve *curve; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_B, 0, + CM_SHAPER_RAMA_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_G, 0, + CM_SHAPER_RAMA_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G, 0); + REG_SET_2(CM_SHAPER_RAMA_START_CNTL_R, 0, + CM_SHAPER_RAMA_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R, 0); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_B, 0, + CM_SHAPER_RAMA_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_G, 0, + CM_SHAPER_RAMA_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMA_END_CNTL_R, 0, + CM_SHAPER_RAMA_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, + CM_SHAPER_RAMA_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + REG_SET_4(CM_SHAPER_RAMA_REGION_0_1, 0, + CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_2_3, 0, + CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_4_5, 0, + CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_6_7, 0, + CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_8_9, 0, + CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_10_11, 0, + CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_12_13, 0, + CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_14_15, 0, + CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_16_17, 0, + CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_18_19, 0, + CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_20_21, 0, + CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_22_23, 0, + CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_24_25, 0, + CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_26_27, 0, + CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_28_29, 0, + CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_30_31, 0, + CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMA_REGION_32_33, 0, + CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); +} + +/*program shaper RAM B*/ +static void dpp3_program_shaper_lutb_settings( + struct dpp *dpp_base, + const struct pwl_params *params) +{ + const struct gamma_curve *curve; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_B, 0, + CM_SHAPER_RAMB_EXP_REGION_START_B, params->corner_points[0].blue.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B, 0); + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_G, 0, + CM_SHAPER_RAMB_EXP_REGION_START_G, params->corner_points[0].green.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G, 0); + REG_SET_2(CM_SHAPER_RAMB_START_CNTL_R, 0, + CM_SHAPER_RAMB_EXP_REGION_START_R, params->corner_points[0].red.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R, 0); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_B, 0, + CM_SHAPER_RAMB_EXP_REGION_END_B, params->corner_points[1].blue.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_B, params->corner_points[1].blue.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_G, 0, + CM_SHAPER_RAMB_EXP_REGION_END_G, params->corner_points[1].green.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_G, params->corner_points[1].green.custom_float_y); + + REG_SET_2(CM_SHAPER_RAMB_END_CNTL_R, 0, + CM_SHAPER_RAMB_EXP_REGION_END_R, params->corner_points[1].red.custom_float_x, + CM_SHAPER_RAMB_EXP_REGION_END_BASE_R, params->corner_points[1].red.custom_float_y); + + curve = params->arr_curve_points; + REG_SET_4(CM_SHAPER_RAMB_REGION_0_1, 0, + CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_2_3, 0, + CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_4_5, 0, + CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_6_7, 0, + CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_8_9, 0, + CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_10_11, 0, + CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_12_13, 0, + CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_14_15, 0, + CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_16_17, 0, + CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_18_19, 0, + CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_20_21, 0, + CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_22_23, 0, + CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_24_25, 0, + CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_26_27, 0, + CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_28_29, 0, + CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_30_31, 0, + CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS, curve[1].segments_num); + + curve += 2; + REG_SET_4(CM_SHAPER_RAMB_REGION_32_33, 0, + CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET, curve[0].offset, + CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS, curve[0].segments_num, + CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET, curve[1].offset, + CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS, curve[1].segments_num); + +} + + +static bool dpp3_program_shaper(struct dpp *dpp_base, + const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (params == NULL) { + REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, 0); + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_shaper(dpp_base, false); + return false; + } + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_shaper(dpp_base, true); + + current_mode = dpp3_get_shaper_current(dpp_base); + + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + dpp3_configure_shaper_lut(dpp_base, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_A) + dpp3_program_shaper_luta_settings(dpp_base, params); + else + dpp3_program_shaper_lutb_settings(dpp_base, params); + + dpp3_program_shaper_lut( + dpp_base, params->rgb_resulted, params->hw_points_num); + + REG_SET(CM_SHAPER_CONTROL, 0, CM_SHAPER_LUT_MODE, next_mode == LUT_RAM_A ? 1:2); + + return true; + +} + +static enum dc_lut_mode get3dlut_config( + struct dpp *dpp_base, + bool *is_17x17x17, + bool *is_12bits_color_channel) +{ + uint32_t i_mode, i_enable_10bits, lut_size; + enum dc_lut_mode mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_GET(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_30BIT_EN, &i_enable_10bits); + REG_GET(CM_3DLUT_MODE, + CM_3DLUT_MODE_CURRENT, &i_mode); + + switch (i_mode) { + case 0: + mode = LUT_BYPASS; + break; + case 1: + mode = LUT_RAM_A; + break; + case 2: + mode = LUT_RAM_B; + break; + default: + mode = LUT_BYPASS; + break; + } + if (i_enable_10bits > 0) + *is_12bits_color_channel = false; + else + *is_12bits_color_channel = true; + + REG_GET(CM_3DLUT_MODE, CM_3DLUT_SIZE, &lut_size); + + if (lut_size == 0) + *is_17x17x17 = true; + else + *is_17x17x17 = false; + + return mode; +} +/* + * select ramA or ramB, or bypass + * select color channel size 10 or 12 bits + * select 3dlut size 17x17x17 or 9x9x9 + */ +static void dpp3_set_3dlut_mode( + struct dpp *dpp_base, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + bool is_lut_size17x17x17) +{ + uint32_t lut_mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (mode == LUT_BYPASS) + lut_mode = 0; + else if (mode == LUT_RAM_A) + lut_mode = 1; + else + lut_mode = 2; + + REG_UPDATE_2(CM_3DLUT_MODE, + CM_3DLUT_MODE, lut_mode, + CM_3DLUT_SIZE, is_lut_size17x17x17 == true ? 0 : 1); +} + +static void dpp3_select_3dlut_ram( + struct dpp *dpp_base, + enum dc_lut_mode mode, + bool is_color_channel_12bits) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE_2(CM_3DLUT_READ_WRITE_CONTROL, + CM_3DLUT_RAM_SEL, mode == LUT_RAM_A ? 0 : 1, + CM_3DLUT_30BIT_EN, + is_color_channel_12bits == true ? 0:1); +} + + + +static void dpp3_set3dlut_ram12( + struct dpp *dpp_base, + const struct dc_rgb *lut, + uint32_t entries) +{ + uint32_t i, red, green, blue, red1, green1, blue1; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + for (i = 0 ; i < entries; i += 2) { + red = lut[i].red<<4; + green = lut[i].green<<4; + blue = lut[i].blue<<4; + red1 = lut[i+1].red<<4; + green1 = lut[i+1].green<<4; + blue1 = lut[i+1].blue<<4; + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, red, + CM_3DLUT_DATA1, red1); + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, green, + CM_3DLUT_DATA1, green1); + + REG_SET_2(CM_3DLUT_DATA, 0, + CM_3DLUT_DATA0, blue, + CM_3DLUT_DATA1, blue1); + + } +} + +/* + * load selected lut with 10 bits color channels + */ +static void dpp3_set3dlut_ram10( + struct dpp *dpp_base, + const struct dc_rgb *lut, + uint32_t entries) +{ + uint32_t i, red, green, blue, value; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + for (i = 0; i < entries; i++) { + red = lut[i].red; + green = lut[i].green; + blue = lut[i].blue; + + value = (red<<20) | (green<<10) | blue; + + REG_SET(CM_3DLUT_DATA_30BIT, 0, CM_3DLUT_DATA_30BIT, value); + } + +} + + +static void dpp3_select_3dlut_ram_mask( + struct dpp *dpp_base, + uint32_t ram_selection_mask) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE(CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, + ram_selection_mask); + REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0); +} + +static bool dpp3_program_3dlut(struct dpp *dpp_base, + struct tetrahedral_params *params) +{ + enum dc_lut_mode mode; + bool is_17x17x17; + bool is_12bits_color_channel; + struct dc_rgb *lut0; + struct dc_rgb *lut1; + struct dc_rgb *lut2; + struct dc_rgb *lut3; + int lut_size0; + int lut_size; + + if (params == NULL) { + dpp3_set_3dlut_mode(dpp_base, LUT_BYPASS, false, false); + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_hdr3dlut(dpp_base, false); + return false; + } + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_hdr3dlut(dpp_base, true); + + mode = get3dlut_config(dpp_base, &is_17x17x17, &is_12bits_color_channel); + + if (mode == LUT_BYPASS || mode == LUT_RAM_B) + mode = LUT_RAM_A; + else + mode = LUT_RAM_B; + + is_17x17x17 = !params->use_tetrahedral_9; + is_12bits_color_channel = params->use_12bits; + if (is_17x17x17) { + lut0 = params->tetrahedral_17.lut0; + lut1 = params->tetrahedral_17.lut1; + lut2 = params->tetrahedral_17.lut2; + lut3 = params->tetrahedral_17.lut3; + lut_size0 = sizeof(params->tetrahedral_17.lut0)/ + sizeof(params->tetrahedral_17.lut0[0]); + lut_size = sizeof(params->tetrahedral_17.lut1)/ + sizeof(params->tetrahedral_17.lut1[0]); + } else { + lut0 = params->tetrahedral_9.lut0; + lut1 = params->tetrahedral_9.lut1; + lut2 = params->tetrahedral_9.lut2; + lut3 = params->tetrahedral_9.lut3; + lut_size0 = sizeof(params->tetrahedral_9.lut0)/ + sizeof(params->tetrahedral_9.lut0[0]); + lut_size = sizeof(params->tetrahedral_9.lut1)/ + sizeof(params->tetrahedral_9.lut1[0]); + } + + dpp3_select_3dlut_ram(dpp_base, mode, + is_12bits_color_channel); + dpp3_select_3dlut_ram_mask(dpp_base, 0x1); + if (is_12bits_color_channel) + dpp3_set3dlut_ram12(dpp_base, lut0, lut_size0); + else + dpp3_set3dlut_ram10(dpp_base, lut0, lut_size0); + + dpp3_select_3dlut_ram_mask(dpp_base, 0x2); + if (is_12bits_color_channel) + dpp3_set3dlut_ram12(dpp_base, lut1, lut_size); + else + dpp3_set3dlut_ram10(dpp_base, lut1, lut_size); + + dpp3_select_3dlut_ram_mask(dpp_base, 0x4); + if (is_12bits_color_channel) + dpp3_set3dlut_ram12(dpp_base, lut2, lut_size); + else + dpp3_set3dlut_ram10(dpp_base, lut2, lut_size); + + dpp3_select_3dlut_ram_mask(dpp_base, 0x8); + if (is_12bits_color_channel) + dpp3_set3dlut_ram12(dpp_base, lut3, lut_size); + else + dpp3_set3dlut_ram10(dpp_base, lut3, lut_size); + + + dpp3_set_3dlut_mode(dpp_base, mode, is_12bits_color_channel, + is_17x17x17); + + return true; +} +static struct dpp_funcs dcn30_dpp_funcs = { + .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, + .dpp_read_state = dpp30_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = NULL, + .dpp_set_pre_degam = dpp3_set_pre_degam, + .dpp_program_input_lut = NULL, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp3_cnv_setup, + .dpp_program_degamma_pwl = NULL, + .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, + .dpp_program_cm_bias = dpp3_program_cm_bias, + .dpp_program_blnd_lut = dpp3_program_blnd_lut, + .dpp_program_shaper_lut = dpp3_program_shaper, + .dpp_program_3dlut = dpp3_program_3dlut, + .dpp_deferred_update = dpp3_deferred_update, + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp3_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, +}; + + +static struct dpp_caps dcn30_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, + .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions, +}; + +bool dpp3_construct( + struct dcn3_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn3_dpp_registers *tf_regs, + const struct dcn3_dpp_shift *tf_shift, + const struct dcn3_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn30_dpp_funcs; + dpp->base.caps = &dcn30_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + return true; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h new file mode 100644 index 000000000000..2ac8045a87a1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -0,0 +1,642 @@ +/* Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN30_DPP_H__ +#define __DCN30_DPP_H__ + +#include "dcn20/dcn20_dpp.h" + +#define TO_DCN30_DPP(dpp)\ + container_of(dpp, struct dcn3_dpp, base) + +#define DPP_REG_LIST_DCN30_COMMON(id)\ + SRI(CM_DEALPHA, CM, id),\ + SRI(CM_MEM_PWR_STATUS, CM, id),\ + SRI(CM_BIAS_CR_R, CM, id),\ + SRI(CM_BIAS_Y_G_CB_B, CM, id),\ + SRI(PRE_DEGAM, CNVC_CFG, id),\ + SRI(CM_GAMCOR_CONTROL, CM, id),\ + SRI(CM_GAMCOR_LUT_CONTROL, CM, id),\ + SRI(CM_GAMCOR_LUT_INDEX, CM, id),\ + SRI(CM_GAMCOR_LUT_INDEX, CM, id),\ + SRI(CM_GAMCOR_LUT_DATA, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_CNTL_R, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL1_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL2_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL1_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL2_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL1_R, CM, id),\ + SRI(CM_GAMCOR_RAMB_END_CNTL2_R, CM, id),\ + SRI(CM_GAMCOR_RAMB_REGION_0_1, CM, id),\ + SRI(CM_GAMCOR_RAMB_REGION_32_33, CM, id),\ + SRI(CM_GAMCOR_RAMB_OFFSET_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_OFFSET_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_OFFSET_R, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMB_START_BASE_CNTL_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_CNTL_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL1_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL2_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL1_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL2_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL1_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_END_CNTL2_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_REGION_0_1, CM, id),\ + SRI(CM_GAMCOR_RAMA_REGION_32_33, CM, id),\ + SRI(CM_GAMCOR_RAMA_OFFSET_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_OFFSET_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_OFFSET_R, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_G, CM, id),\ + SRI(CM_GAMCOR_RAMA_START_BASE_CNTL_R, CM, id),\ + SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\ + SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\ + SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C31_C32, CM, id),\ + SRI(CM_GAMUT_REMAP_B_C33_C34, CM, id),\ + SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ + SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ + SRI(OTG_H_BLANK, DSCL, id), \ + SRI(OTG_V_BLANK, DSCL, id), \ + SRI(SCL_MODE, DSCL, id), \ + SRI(LB_DATA_FORMAT, DSCL, id), \ + SRI(LB_MEMORY_CTRL, DSCL, id), \ + SRI(DSCL_AUTOCAL, DSCL, id), \ + SRI(DSCL_CONTROL, DSCL, id), \ + SRI(SCL_TAP_CONTROL, DSCL, id), \ + SRI(SCL_COEF_RAM_TAP_SELECT, DSCL, id), \ + SRI(SCL_COEF_RAM_TAP_DATA, DSCL, id), \ + SRI(DSCL_2TAP_CONTROL, DSCL, id), \ + SRI(MPC_SIZE, DSCL, id), \ + SRI(SCL_HORZ_FILTER_SCALE_RATIO, DSCL, id), \ + SRI(SCL_VERT_FILTER_SCALE_RATIO, DSCL, id), \ + SRI(SCL_HORZ_FILTER_SCALE_RATIO_C, DSCL, id), \ + SRI(SCL_VERT_FILTER_SCALE_RATIO_C, DSCL, id), \ + SRI(SCL_HORZ_FILTER_INIT, DSCL, id), \ + SRI(SCL_HORZ_FILTER_INIT_C, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT, DSCL, id), \ + SRI(SCL_VERT_FILTER_INIT_C, DSCL, id), \ + SRI(RECOUT_START, DSCL, id), \ + SRI(RECOUT_SIZE, DSCL, id), \ + SRI(PRE_DEALPHA, CNVC_CFG, id), \ + SRI(PRE_REALPHA, CNVC_CFG, id), \ + SRI(PRE_CSC_MODE, CNVC_CFG, id), \ + SRI(PRE_CSC_C11_C12, CNVC_CFG, id), \ + SRI(PRE_CSC_C33_C34, CNVC_CFG, id), \ + SRI(PRE_CSC_B_C11_C12, CNVC_CFG, id), \ + SRI(PRE_CSC_B_C33_C34, CNVC_CFG, id), \ + SRI(CM_POST_CSC_CONTROL, CM, id), \ + SRI(CM_POST_CSC_C11_C12, CM, id), \ + SRI(CM_POST_CSC_C33_C34, CM, id), \ + SRI(CM_POST_CSC_B_C11_C12, CM, id), \ + SRI(CM_POST_CSC_B_C33_C34, CM, id), \ + SRI(CM_MEM_PWR_CTRL, CM, id), \ + SRI(CM_CONTROL, CM, id), \ + SRI(FORMAT_CONTROL, CNVC_CFG, id), \ + SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ + SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ + SRI(CURSOR0_COLOR0, CNVC_CUR, id), \ + SRI(CURSOR0_COLOR1, CNVC_CUR, id), \ + SRI(CURSOR0_FP_SCALE_BIAS, CNVC_CUR, id), \ + SRI(DPP_CONTROL, DPP_TOP, id), \ + SRI(CM_HDR_MULT_COEF, CM, id), \ + SRI(CURSOR_CONTROL, CURSOR0_, id), \ + SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_G, CNVC_CFG, id), \ + SRI(FCNV_FP_BIAS_B, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_R, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_G, CNVC_CFG, id), \ + SRI(FCNV_FP_SCALE_B, CNVC_CFG, id), \ + SRI(COLOR_KEYER_CONTROL, CNVC_CFG, id), \ + SRI(COLOR_KEYER_ALPHA, CNVC_CFG, id), \ + SRI(COLOR_KEYER_RED, CNVC_CFG, id), \ + SRI(COLOR_KEYER_GREEN, CNVC_CFG, id), \ + SRI(COLOR_KEYER_BLUE, CNVC_CFG, id), \ + SRI(CURSOR_CONTROL, CURSOR0_, id),\ + SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ + SRI(DSCL_MEM_PWR_STATUS, DSCL, id), \ + SRI(DSCL_MEM_PWR_CTRL, DSCL, id) + +#define DPP_REG_LIST_DCN30(id)\ + DPP_REG_LIST_DCN30_COMMON(id), \ + TF_REG_LIST_DCN20_COMMON(id), \ + SRI(CM_BLNDGAM_CONTROL, CM, id), \ + SRI(CM_SHAPER_LUT_DATA, CM, id),\ + SRI(CM_MEM_PWR_CTRL2, CM, id), \ + SRI(CM_MEM_PWR_STATUS2, CM, id), \ + SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM, id),\ + SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM, id),\ + SRI(CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM, id),\ + SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM, id),\ + SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM, id),\ + SRI(CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM, id),\ + SRI(CM_BLNDGAM_LUT_CONTROL, CM, id) + + + +#define DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh)\ + TF_SF(CM0_CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, mask_sh),\ + TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_EN, mask_sh),\ + TF_SF(CM0_CM_DEALPHA, CM_DEALPHA_ABLND, mask_sh),\ + TF_SF(CM0_CM_BIAS_CR_R, CM_BIAS_CR_R, mask_sh),\ + TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_Y_G, mask_sh),\ + TF_SF(CM0_CM_BIAS_Y_G_CB_B, CM_BIAS_CB_B, mask_sh),\ + TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_DIS, mask_sh),\ + TF_SF(CM0_CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, mask_sh),\ + TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_MODE, mask_sh),\ + TF_SF(CNVC_CFG0_PRE_DEGAM, PRE_DEGAM_SELECT, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_PWL_DISABLE, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_INDEX, CM_GAMCOR_LUT_INDEX, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_DATA, CM_GAMCOR_LUT_DATA, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_WRITE_COLOR_MASK, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_READ_COLOR_SEL, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_HOST_SEL, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_LUT_CONTROL, CM_GAMCOR_LUT_CONFIG_MODE, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_START_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_START_SLOPE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_START_BASE_CNTL_B, CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL1_B, CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_END_CNTL2_B, CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_OFFSET_B, CM_GAMCOR_RAMA_OFFSET_B, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh),\ + TF_SF(CM0_CM_GAMCOR_RAMA_REGION_0_1, CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_RIGHT, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_BOTTOM, mask_sh),\ + TF_SF(DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM, EXT_OVERSCAN_TOP, mask_sh),\ + TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_START, mask_sh),\ + TF_SF(DSCL0_OTG_H_BLANK, OTG_H_BLANK_END, mask_sh),\ + TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_START, mask_sh),\ + TF_SF(DSCL0_OTG_V_BLANK, OTG_V_BLANK_END, mask_sh),\ + TF_SF(DSCL0_LB_DATA_FORMAT, INTERLEAVE_EN, mask_sh),\ + TF2_SF(DSCL0, LB_DATA_FORMAT__ALPHA_EN, mask_sh),\ + TF_SF(DSCL0_LB_MEMORY_CTRL, MEMORY_CONFIG, mask_sh),\ + TF_SF(DSCL0_LB_MEMORY_CTRL, LB_MAX_PARTITIONS, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_MODE, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_NUM_PIPE, mask_sh),\ + TF_SF(DSCL0_DSCL_CONTROL, SCL_BOUNDARY_MODE, mask_sh),\ + TF_SF(DSCL0_DSCL_AUTOCAL, AUTOCAL_PIPE_ID, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_V_NUM_TAPS_C, mask_sh),\ + TF_SF(DSCL0_SCL_TAP_CONTROL, SCL_H_NUM_TAPS_C, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_TAP_PAIR_IDX, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_PHASE, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_SELECT, SCL_COEF_RAM_FILTER_TYPE, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_EVEN_TAP_COEF_EN, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF, mask_sh),\ + TF_SF(DSCL0_SCL_COEF_RAM_TAP_DATA, SCL_COEF_RAM_ODD_TAP_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_HARDCODE_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_H_2TAP_SHARP_FACTOR, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_HARDCODE_COEF_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_EN, mask_sh),\ + TF_SF(DSCL0_DSCL_2TAP_CONTROL, SCL_V_2TAP_SHARP_FACTOR, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, DSCL_MODE, mask_sh),\ + TF_SF(DSCL0_RECOUT_START, RECOUT_START_X, mask_sh),\ + TF_SF(DSCL0_RECOUT_START, RECOUT_START_Y, mask_sh),\ + TF_SF(DSCL0_RECOUT_SIZE, RECOUT_WIDTH, mask_sh),\ + TF_SF(DSCL0_RECOUT_SIZE, RECOUT_HEIGHT, mask_sh),\ + TF_SF(DSCL0_MPC_SIZE, MPC_WIDTH, mask_sh),\ + TF_SF(DSCL0_MPC_SIZE, MPC_HEIGHT, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO, SCL_H_SCALE_RATIO, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO, SCL_V_SCALE_RATIO, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C, SCL_H_SCALE_RATIO_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C, SCL_V_SCALE_RATIO_C, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_FRAC, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT, SCL_H_INIT_INT, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_FRAC_C, mask_sh),\ + TF_SF(DSCL0_SCL_HORZ_FILTER_INIT_C, SCL_H_INIT_INT_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_FRAC, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT, SCL_V_INIT_INT, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_FRAC_C, mask_sh),\ + TF_SF(DSCL0_SCL_VERT_FILTER_INIT_C, SCL_V_INIT_INT_C, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_CHROMA_COEF_MODE, mask_sh),\ + TF_SF(DSCL0_SCL_MODE, SCL_COEF_RAM_SELECT_CURRENT, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_EN, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_DEALPHA, PRE_DEALPHA_ABLND_EN, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_EN, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_REALPHA, PRE_REALPHA_ABLND_EN, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_MODE, PRE_CSC_MODE_CURRENT, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C11, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_C11_C12, PRE_CSC_C12, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C33, mask_sh), \ + TF_SF(CNVC_CFG0_PRE_CSC_C33_C34, PRE_CSC_C34, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_CONTROL, CM_POST_CSC_MODE_CURRENT, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C11, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \ + TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ + TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ + TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_SURFACE_PIXEL_FORMAT, mask_sh), \ + TF_SF(CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT, CNVC_ALPHA_PLANE_ENABLE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_EXPANSION_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ENABLE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_COLOR0, CUR0_COLOR0, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_COLOR1, CUR0_COLOR1, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_BIAS, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_FP_SCALE_BIAS, CUR0_FP_SCALE, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_CLOCK_ENABLE, mask_sh), \ + TF_SF(CM0_CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, mask_sh), \ + TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \ + TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CNV16, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, CLAMP_POSITIVE_C, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_R, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_G, mask_sh), \ + TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_CROSSBAR_B, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, mask_sh), \ + TF_SF(CNVC_CFG0_ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, mask_sh), \ + TF_SF(CNVC_CFG0_FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_EN, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_CONTROL, COLOR_KEYER_MODE, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_ALPHA, COLOR_KEYER_ALPHA_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_RED, COLOR_KEYER_RED_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_GREEN, COLOR_KEYER_GREEN_HIGH, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_LOW, mask_sh), \ + TF_SF(CNVC_CFG0_COLOR_KEYER_BLUE, COLOR_KEYER_BLUE_HIGH, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIX_INV_MODE, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_PIXEL_ALPHA_MOD_EN, mask_sh), \ + TF_SF(CNVC_CUR0_CURSOR0_CONTROL, CUR0_ROM_EN, mask_sh),\ + TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ + TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh),\ + TF_SF(DSCL0_DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, mask_sh) + +#define DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh)\ + TF_SF(CM0_CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, mask_sh), \ + TF_SF(CM0_CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, mask_sh),\ + TF_SF(CM0_CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, mask_sh),\ + TF_SF(CM0_CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, mask_sh),\ + TF_SF(CM0_CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, mask_sh),\ + TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT_CURRENT, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_SELECT, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_HOST_SEL, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_CONFIG_MODE, mask_sh), \ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, mask_sh), \ + TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, mask_sh) + + +#define DPP_REG_LIST_SH_MASK_DCN30(mask_sh)\ + DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ + DPP_REG_LIST_SH_MASK_DCN30_UPDATED(mask_sh) + +#define DPP_REG_FIELD_LIST_DCN3(type) \ + TF_REG_FIELD_LIST_DCN2_0(type); \ + type FORMAT_CROSSBAR_R; \ + type FORMAT_CROSSBAR_G; \ + type FORMAT_CROSSBAR_B; \ + type CM_DEALPHA_EN;\ + type CM_DEALPHA_ABLND;\ + type CM_BIAS_Y_G;\ + type CM_BIAS_CB_B;\ + type CM_BIAS_CR_R;\ + type GAMCOR_MEM_PWR_DIS; \ + type GAMCOR_MEM_PWR_FORCE; \ + type HDR3DLUT_MEM_PWR_FORCE; \ + type SHAPER_MEM_PWR_FORCE; \ + type PRE_DEGAM_MODE;\ + type PRE_DEGAM_SELECT;\ + type CNVC_ALPHA_PLANE_ENABLE; \ + type PRE_DEALPHA_EN; \ + type PRE_DEALPHA_ABLND_EN; \ + type PRE_REALPHA_EN; \ + type PRE_REALPHA_ABLND_EN; \ + type PRE_CSC_MODE; \ + type PRE_CSC_MODE_CURRENT; \ + type PRE_CSC_C11; \ + type PRE_CSC_C12; \ + type PRE_CSC_C33; \ + type PRE_CSC_C34; \ + type CM_POST_CSC_MODE; \ + type CM_POST_CSC_MODE_CURRENT; \ + type CM_POST_CSC_C11; \ + type CM_POST_CSC_C12; \ + type CM_POST_CSC_C33; \ + type CM_POST_CSC_C34; \ + type CM_GAMCOR_MODE; \ + type CM_GAMCOR_SELECT; \ + type CM_GAMCOR_PWL_DISABLE; \ + type CM_GAMCOR_MODE_CURRENT; \ + type CM_GAMCOR_SELECT_CURRENT; \ + type CM_GAMCOR_LUT_INDEX; \ + type CM_GAMCOR_LUT_DATA; \ + type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \ + type CM_GAMCOR_LUT_READ_COLOR_SEL; \ + type CM_GAMCOR_LUT_HOST_SEL; \ + type CM_GAMCOR_LUT_CONFIG_MODE; \ + type CM_GAMCOR_LUT_STATUS; \ + type CM_GAMCOR_RAMA_EXP_REGION_START_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_END_B; \ + type CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; \ + type CM_GAMCOR_RAMA_OFFSET_B; \ + type CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; \ + type CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; \ + type CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; \ + type CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS;\ + type CM_GAMUT_REMAP_MODE_CURRENT;\ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_B; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_G; \ + type CM_BLNDGAM_RAMB_EXP_REGION_START_SLOPE_R; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_B; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_G; \ + type CM_BLNDGAM_RAMA_EXP_REGION_START_SLOPE_R; \ + type CM_BLNDGAM_LUT_WRITE_COLOR_MASK; \ + type CM_BLNDGAM_LUT_HOST_SEL; \ + type CM_BLNDGAM_LUT_CONFIG_MODE; \ + type CM_3DLUT_MODE_CURRENT; \ + type CM_SHAPER_MODE_CURRENT; \ + type CM_BLNDGAM_MODE; \ + type CM_BLNDGAM_MODE_CURRENT; \ + type CM_BLNDGAM_SELECT_CURRENT; \ + type CM_BLNDGAM_SELECT; \ + type GAMCOR_MEM_PWR_STATE; \ + type BLNDGAM_MEM_PWR_STATE; \ + type HDR3DLUT_MEM_PWR_STATE; \ + type SHAPER_MEM_PWR_STATE + +struct dcn3_dpp_shift { + DPP_REG_FIELD_LIST_DCN3(uint8_t); +}; + +struct dcn3_dpp_mask { + DPP_REG_FIELD_LIST_DCN3(uint32_t); +}; + +#define DPP_DCN3_REG_VARIABLE_LIST_COMMON \ + DPP_DCN2_REG_VARIABLE_LIST; \ + uint32_t CM_MEM_PWR_STATUS;\ + uint32_t CM_MEM_PWR_STATUS2;\ + uint32_t CM_MEM_PWR_CTRL2;\ + uint32_t CM_DEALPHA;\ + uint32_t CM_BIAS_CR_R;\ + uint32_t CM_BIAS_Y_G_CB_B;\ + uint32_t PRE_DEGAM;\ + uint32_t PRE_DEALPHA; \ + uint32_t PRE_REALPHA; \ + uint32_t PRE_CSC_MODE; \ + uint32_t PRE_CSC_C11_C12; \ + uint32_t PRE_CSC_C33_C34; \ + uint32_t PRE_CSC_B_C11_C12; \ + uint32_t PRE_CSC_B_C33_C34; \ + uint32_t CM_POST_CSC_CONTROL; \ + uint32_t CM_POST_CSC_C11_C12; \ + uint32_t CM_POST_CSC_C33_C34; \ + uint32_t CM_POST_CSC_B_C11_C12; \ + uint32_t CM_POST_CSC_B_C33_C34; \ + uint32_t CM_GAMUT_REMAP_B_C11_C12; \ + uint32_t CM_GAMUT_REMAP_B_C13_C14; \ + uint32_t CM_GAMUT_REMAP_B_C21_C22; \ + uint32_t CM_GAMUT_REMAP_B_C23_C24; \ + uint32_t CM_GAMUT_REMAP_B_C31_C32; \ + uint32_t CM_GAMUT_REMAP_B_C33_C34; \ + uint32_t CM_GAMCOR_CONTROL; \ + uint32_t CM_GAMCOR_LUT_CONTROL; \ + uint32_t CM_GAMCOR_LUT_INDEX; \ + uint32_t CM_GAMCOR_LUT_DATA; \ + uint32_t CM_GAMCOR_RAMB_START_CNTL_B; \ + uint32_t CM_GAMCOR_RAMB_START_CNTL_G; \ + uint32_t CM_GAMCOR_RAMB_START_CNTL_R; \ + uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_B; \ + uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_G; \ + uint32_t CM_GAMCOR_RAMB_START_SLOPE_CNTL_R; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL1_B; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL2_B; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL1_G; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL2_G; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL1_R; \ + uint32_t CM_GAMCOR_RAMB_END_CNTL2_R; \ + uint32_t CM_GAMCOR_RAMB_REGION_0_1; \ + uint32_t CM_GAMCOR_RAMB_REGION_32_33; \ + uint32_t CM_GAMCOR_RAMB_OFFSET_B; \ + uint32_t CM_GAMCOR_RAMB_OFFSET_G; \ + uint32_t CM_GAMCOR_RAMB_OFFSET_R; \ + uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_B; \ + uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_G; \ + uint32_t CM_GAMCOR_RAMB_START_BASE_CNTL_R; \ + uint32_t CM_GAMCOR_RAMA_START_CNTL_B; \ + uint32_t CM_GAMCOR_RAMA_START_CNTL_G; \ + uint32_t CM_GAMCOR_RAMA_START_CNTL_R; \ + uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_B; \ + uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_G; \ + uint32_t CM_GAMCOR_RAMA_START_SLOPE_CNTL_R; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL1_B; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL2_B; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL1_G; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL2_G; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL1_R; \ + uint32_t CM_GAMCOR_RAMA_END_CNTL2_R; \ + uint32_t CM_GAMCOR_RAMA_REGION_0_1; \ + uint32_t CM_GAMCOR_RAMA_REGION_32_33; \ + uint32_t CM_GAMCOR_RAMA_OFFSET_B; \ + uint32_t CM_GAMCOR_RAMA_OFFSET_G; \ + uint32_t CM_GAMCOR_RAMA_OFFSET_R; \ + uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_B; \ + uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_G; \ + uint32_t CM_GAMCOR_RAMA_START_BASE_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMA_START_SLOPE_CNTL_R; \ + uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_B; \ + uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_G; \ + uint32_t CM_BLNDGAM_RAMB_START_SLOPE_CNTL_R; \ + uint32_t CM_BLNDGAM_LUT_CONTROL + + +struct dcn3_dpp_registers { + DPP_DCN3_REG_VARIABLE_LIST_COMMON; +}; + + +struct dcn3_dpp { + struct dpp base; + + const struct dcn3_dpp_registers *tf_regs; + const struct dcn3_dpp_shift *tf_shift; + const struct dcn3_dpp_mask *tf_mask; + + const uint16_t *filter_v; + const uint16_t *filter_h; + const uint16_t *filter_v_c; + const uint16_t *filter_h_c; + int lb_pixel_depth_supported; + int lb_memory_size; + int lb_bits_per_entry; + bool is_write_to_ram_a_safe; + struct scaler_data scl_data; + struct pwl_params pwl_data; +}; + +bool dpp3_construct(struct dcn3_dpp *dpp3, + struct dc_context *ctx, + uint32_t inst, + const struct dcn3_dpp_registers *tf_regs, + const struct dcn3_dpp_shift *tf_shift, + const struct dcn3_dpp_mask *tf_mask); + +bool dpp3_program_gamcor_lut( + struct dpp *dpp_base, const struct pwl_params *params); + +void dpp3_program_CM_dealpha( + struct dpp *dpp_base, + uint32_t enable, uint32_t additive_blending); + +void dpp30_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s); + +bool dpp3_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps); + +void dpp3_cnv_setup ( + struct dpp *dpp_base, + enum surface_pixel_format format, + enum expansion_mode mode, + struct dc_csc_transform input_csc_color_matrix, + enum dc_color_space input_color_space, + struct cnv_alpha_2bit_lut *alpha_2bit_lut); + +void dpp3_program_CM_bias( + struct dpp *dpp_base, + struct CM_bias_params *bias_params); + +void dpp3_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier); + +void dpp3_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust); + +void dpp3_set_pre_degam(struct dpp *dpp_base, + enum dc_transfer_func_predefined tr); + +void dpp3_set_cursor_attributes( + struct dpp *dpp_base, + struct dc_cursor_attributes *cursor_attributes); + +void dpp3_program_post_csc( + struct dpp *dpp_base, + enum dc_color_space color_space, + enum dcn10_input_csc_select input_select, + const struct out_csc_color_matrix *tbl_entry); + +void dpp3_program_cm_bias( + struct dpp *dpp_base, + struct CM_bias_params *bias_params); + +void dpp3_program_cm_dealpha( + struct dpp *dpp_base, + uint32_t enable, uint32_t additive_blending); + +void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust); +#endif /* __DC_HWSS_DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c new file mode 100644 index 000000000000..ce1b3cf7e1bb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c @@ -0,0 +1,461 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "core_types.h" +#include "reg_helper.h" +#include "dcn30/dcn30_dpp.h" +#include "basics/conversion.h" +#include "dcn30/dcn30_cm_common.h" + +#define REG(reg)\ + dpp->tf_regs->reg + +#define CTX \ + dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + dpp->tf_shift->field_name, dpp->tf_mask->field_name + +static void dpp3_enable_cm_block( + struct dpp *dpp_base) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + unsigned int cm_bypass_mode = 0; + + // debug option: put CM in bypass mode + if (dpp_base->ctx->dc->debug.cm_in_bypass) + cm_bypass_mode = 1; + + REG_UPDATE(CM_CONTROL, CM_BYPASS, cm_bypass_mode); +} + +static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base) +{ + enum dc_lut_mode mode = LUT_BYPASS; + uint32_t state_mode; + uint32_t lut_mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode); + + if (state_mode == 2) {//Programmable RAM LUT + REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode); + if (lut_mode == 0) + mode = LUT_RAM_A; + else + mode = LUT_RAM_B; + } + + return mode; +} + +static void dpp3_program_gammcor_lut( + struct dpp *dpp_base, + const struct pwl_result_data *rgb, + uint32_t num, + bool is_ram_a) +{ + uint32_t i; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint32_t last_base_value_red = rgb[num-1].red_reg + rgb[num-1].delta_red_reg; + uint32_t last_base_value_green = rgb[num-1].green_reg + rgb[num-1].delta_green_reg; + uint32_t last_base_value_blue = rgb[num-1].blue_reg + rgb[num-1].delta_blue_reg; + + /*fill in the LUT with all base values to be used by pwl module + * HW auto increments the LUT index: back-to-back write + */ + if (is_rgb_equal(rgb, num)) { + for (i = 0 ; i < num; i++) + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg); + + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red); + + } else { + REG_UPDATE(CM_GAMCOR_LUT_CONTROL, + CM_GAMCOR_LUT_WRITE_COLOR_MASK, 4); + for (i = 0 ; i < num; i++) + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].red_reg); + + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_red); + + REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); + + REG_UPDATE(CM_GAMCOR_LUT_CONTROL, + CM_GAMCOR_LUT_WRITE_COLOR_MASK, 2); + for (i = 0 ; i < num; i++) + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].green_reg); + + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_green); + + REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); + + REG_UPDATE(CM_GAMCOR_LUT_CONTROL, + CM_GAMCOR_LUT_WRITE_COLOR_MASK, 1); + for (i = 0 ; i < num; i++) + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, rgb[i].blue_reg); + + REG_SET(CM_GAMCOR_LUT_DATA, 0, CM_GAMCOR_LUT_DATA, last_base_value_blue); + } +} + +static void dpp3_power_on_gamcor_lut( + struct dpp *dpp_base, + bool power_on) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) { + if (power_on) { + REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0); + REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5); + } else { + dpp_base->ctx->dc->optimized_required = true; + dpp_base->deferred_reg_writes.bits.disable_gamcor = true; + } + } else + REG_SET(CM_MEM_PWR_CTRL, 0, + GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1); +} + +void dpp3_program_cm_dealpha( + struct dpp *dpp_base, + uint32_t enable, uint32_t additive_blending) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_SET_2(CM_DEALPHA, 0, + CM_DEALPHA_EN, enable, + CM_DEALPHA_ABLND, additive_blending); +} + +void dpp3_program_cm_bias( + struct dpp *dpp_base, + struct CM_bias_params *bias_params) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_SET(CM_BIAS_CR_R, 0, CM_BIAS_CR_R, bias_params->cm_bias_cr_r); + REG_SET_2(CM_BIAS_Y_G_CB_B, 0, + CM_BIAS_Y_G, bias_params->cm_bias_y_g, + CM_BIAS_CB_B, bias_params->cm_bias_cb_b); +} + +static void dpp3_gamcor_reg_field( + struct dcn3_dpp *dpp, + struct dcn3_xfer_func_reg *reg) +{ + + reg->shifts.field_region_start_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; + reg->masks.field_region_start_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_BASE_B; + reg->shifts.field_offset = dpp->tf_shift->CM_GAMCOR_RAMA_OFFSET_B; + reg->masks.field_offset = dpp->tf_mask->CM_GAMCOR_RAMA_OFFSET_B; + + reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; + reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_LUT_OFFSET; + reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION0_NUM_SEGMENTS; + reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; + reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_LUT_OFFSET; + reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS; + reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION1_NUM_SEGMENTS; + + reg->shifts.field_region_end = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_B; + reg->masks.field_region_end = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_B; + reg->shifts.field_region_end_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; + reg->masks.field_region_end_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_SLOPE_B; + reg->shifts.field_region_end_base = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; + reg->masks.field_region_end_base = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_END_BASE_B; + reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; + reg->masks.field_region_linear_slope = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SLOPE_B; + reg->shifts.exp_region_start = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_B; + reg->masks.exp_region_start = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_B; + reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; + reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_GAMCOR_RAMA_EXP_REGION_START_SEGMENT_B; +} + +static void dpp3_configure_gamcor_lut( + struct dpp *dpp_base, + bool is_ram_a) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE(CM_GAMCOR_LUT_CONTROL, + CM_GAMCOR_LUT_WRITE_COLOR_MASK, 7); + REG_UPDATE(CM_GAMCOR_LUT_CONTROL, + CM_GAMCOR_LUT_HOST_SEL, is_ram_a == true ? 0:1); + REG_SET(CM_GAMCOR_LUT_INDEX, 0, CM_GAMCOR_LUT_INDEX, 0); +} + + +bool dpp3_program_gamcor_lut( + struct dpp *dpp_base, const struct pwl_params *params) +{ + enum dc_lut_mode current_mode; + enum dc_lut_mode next_mode; + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + struct dcn3_xfer_func_reg gam_regs; + + dpp3_enable_cm_block(dpp_base); + + if (params == NULL) { //bypass if we have no pwl data + REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 0); + if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) + dpp3_power_on_gamcor_lut(dpp_base, false); + return false; + } + dpp3_power_on_gamcor_lut(dpp_base, true); + REG_SET(CM_GAMCOR_CONTROL, 0, CM_GAMCOR_MODE, 2); + + current_mode = dpp30_get_gamcor_current(dpp_base); + if (current_mode == LUT_BYPASS || current_mode == LUT_RAM_A) + next_mode = LUT_RAM_B; + else + next_mode = LUT_RAM_A; + + dpp3_power_on_gamcor_lut(dpp_base, true); + dpp3_configure_gamcor_lut(dpp_base, next_mode == LUT_RAM_A); + + if (next_mode == LUT_RAM_B) { + gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMB_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMB_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMB_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMB_START_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMB_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMB_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMB_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMB_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMB_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMB_END_CNTL2_R); + gam_regs.region_start = REG(CM_GAMCOR_RAMB_REGION_0_1); + gam_regs.region_end = REG(CM_GAMCOR_RAMB_REGION_32_33); + //New registers in DCN3AG/DCN GAMCOR block + gam_regs.offset_b = REG(CM_GAMCOR_RAMB_OFFSET_B); + gam_regs.offset_g = REG(CM_GAMCOR_RAMB_OFFSET_G); + gam_regs.offset_r = REG(CM_GAMCOR_RAMB_OFFSET_R); + gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_B); + gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_G); + gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMB_START_BASE_CNTL_R); + } else { + gam_regs.start_cntl_b = REG(CM_GAMCOR_RAMA_START_CNTL_B); + gam_regs.start_cntl_g = REG(CM_GAMCOR_RAMA_START_CNTL_G); + gam_regs.start_cntl_r = REG(CM_GAMCOR_RAMA_START_CNTL_R); + gam_regs.start_slope_cntl_b = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_B); + gam_regs.start_slope_cntl_g = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_G); + gam_regs.start_slope_cntl_r = REG(CM_GAMCOR_RAMA_START_SLOPE_CNTL_R); + gam_regs.start_end_cntl1_b = REG(CM_GAMCOR_RAMA_END_CNTL1_B); + gam_regs.start_end_cntl2_b = REG(CM_GAMCOR_RAMA_END_CNTL2_B); + gam_regs.start_end_cntl1_g = REG(CM_GAMCOR_RAMA_END_CNTL1_G); + gam_regs.start_end_cntl2_g = REG(CM_GAMCOR_RAMA_END_CNTL2_G); + gam_regs.start_end_cntl1_r = REG(CM_GAMCOR_RAMA_END_CNTL1_R); + gam_regs.start_end_cntl2_r = REG(CM_GAMCOR_RAMA_END_CNTL2_R); + gam_regs.region_start = REG(CM_GAMCOR_RAMA_REGION_0_1); + gam_regs.region_end = REG(CM_GAMCOR_RAMA_REGION_32_33); + //New registers in DCN3AG/DCN GAMCOR block + gam_regs.offset_b = REG(CM_GAMCOR_RAMA_OFFSET_B); + gam_regs.offset_g = REG(CM_GAMCOR_RAMA_OFFSET_G); + gam_regs.offset_r = REG(CM_GAMCOR_RAMA_OFFSET_R); + gam_regs.start_base_cntl_b = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_B); + gam_regs.start_base_cntl_g = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_G); + gam_regs.start_base_cntl_r = REG(CM_GAMCOR_RAMA_START_BASE_CNTL_R); + } + + //get register fields + dpp3_gamcor_reg_field(dpp, &gam_regs); + + //program register set for LUTA/LUTB + cm_helper_program_gamcor_xfer_func(dpp_base->ctx, params, &gam_regs); + + dpp3_program_gammcor_lut(dpp_base, params->rgb_resulted, params->hw_points_num, + next_mode == LUT_RAM_A); + + //select Gamma LUT to use for next frame + REG_UPDATE(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT, next_mode == LUT_RAM_A ? 0:1); + + return true; +} + +void dpp3_set_hdr_multiplier( + struct dpp *dpp_base, + uint32_t multiplier) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + + REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier); +} + + +static void program_gamut_remap( + struct dcn3_dpp *dpp, + const uint16_t *regval, + int select) +{ + uint16_t selection = 0; + struct color_matrices_reg gam_regs; + + if (regval == NULL || select == GAMUT_REMAP_BYPASS) { + REG_SET(CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, 0); + return; + } + switch (select) { + case GAMUT_REMAP_COEFF: + selection = 1; + break; + /*this corresponds to GAMUT_REMAP coefficients set B + *we don't have common coefficient sets in dcn3ag/dcn3 + */ + case GAMUT_REMAP_COMA_COEFF: + selection = 2; + break; + default: + break; + } + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + + if (select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } else if (select == GAMUT_REMAP_COMA_COEFF) { + + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + } + //select coefficient set to use + REG_SET( + CM_GAMUT_REMAP_CONTROL, 0, + CM_GAMUT_REMAP_MODE, selection); +} + +void dpp3_cm_set_gamut_remap( + struct dpp *dpp_base, + const struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + int i = 0; + int gamut_mode; + + if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) + /* Bypass if type is bypass or hw */ + program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS); + else { + struct fixed31_32 arr_matrix[12]; + uint16_t arr_reg_val[12]; + + for (i = 0; i < 12; i++) + arr_matrix[i] = adjust->temperature_matrix[i]; + + convert_float_matrix( + arr_reg_val, arr_matrix, 12); + + //current coefficient set in use + REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &gamut_mode); + + if (gamut_mode == 0) + gamut_mode = 1; //use coefficient set A + else if (gamut_mode == 1) + gamut_mode = 2; + else + gamut_mode = 1; + + //follow dcn2 approach for now - using only coefficient set A + program_gamut_remap(dpp, arr_reg_val, gamut_mode); + } +} + +static void read_gamut_remap(struct dcn3_dpp *dpp, + uint16_t *regval, + int *select) +{ + struct color_matrices_reg gam_regs; + uint32_t selection; + + //current coefficient set in use + REG_GET(CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE_CURRENT, &selection); + + *select = selection; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11; + gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11; + gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12; + gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12; + + if (*select == GAMUT_REMAP_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + + } else if (*select == GAMUT_REMAP_COMA_COEFF) { + gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_B_C11_C12); + gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_B_C33_C34); + + cm_helper_read_color_matrices(dpp->base.ctx, + regval, + &gam_regs); + } +} + +void dpp3_cm_get_gamut_remap(struct dpp *dpp_base, + struct dpp_grph_csc_adjustment *adjust) +{ + struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); + uint16_t arr_reg_val[12]; + int select; + + read_gamut_remap(dpp, arr_reg_val, &select); + + if (select == GAMUT_REMAP_BYPASS) { + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; + return; + } + + adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + convert_hw_matrix(adjust->temperature_matrix, + arr_reg_val, ARRAY_SIZE(arr_reg_val)); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt new file mode 100644 index 000000000000..7743edc4599f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn32_dpp.c + dcn32_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c new file mode 100644 index 000000000000..41679997b44d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c @@ -0,0 +1,165 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "core_types.h" +#include "reg_helper.h" +#include "dcn32/dcn32_dpp.h" +#include "basics/conversion.h" +#include "dcn30/dcn30_cm_common.h" + +/* Compute the maximum number of lines that we can fit in the line buffer */ +static void dscl32_calc_lb_num_partitions( + const struct scaler_data *scl_data, + enum lb_memory_config lb_config, + int *num_part_y, + int *num_part_c) +{ + int memory_line_size_y, memory_line_size_c, memory_line_size_a, + lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a; + + int line_size = scl_data->viewport.width < scl_data->recout.width ? + scl_data->viewport.width : scl_data->recout.width; + int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ? + scl_data->viewport_c.width : scl_data->recout.width; + + if (line_size == 0) + line_size = 1; + + if (line_size_c == 0) + line_size_c = 1; + + memory_line_size_y = (line_size + 5) / 6; /* +5 to ceil */ + memory_line_size_c = (line_size_c + 5) / 6; /* +5 to ceil */ + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */ + + if (lb_config == LB_MEMORY_CONFIG_1) { + lb_memory_size = 970; + lb_memory_size_c = 970; + lb_memory_size_a = 970; + } else if (lb_config == LB_MEMORY_CONFIG_2) { + lb_memory_size = 1290; + lb_memory_size_c = 1290; + lb_memory_size_a = 1290; + } else if (lb_config == LB_MEMORY_CONFIG_3) { + if (scl_data->viewport.width == scl_data->h_active && + scl_data->viewport.height == scl_data->v_active) { + /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ + /* use increased LB size for calculation only if Scaler not enabled */ + lb_memory_size = 970 + 1290 + 1170 + 1170 + 1170; + lb_memory_size_c = 970 + 1290; + lb_memory_size_a = 970 + 1290 + 1170; + } else { + /* 420 mode: luma using all 3 mem from Y, plus 3rd mem from Cr and Cb */ + lb_memory_size = 970 + 1290 + 484 + 484 + 484; + lb_memory_size_c = 970 + 1290; + lb_memory_size_a = 970 + 1290 + 484; + } + } else { + if (scl_data->viewport.width == scl_data->h_active && + scl_data->viewport.height == scl_data->v_active) { + /* use increased LB size for calculation only if Scaler not enabled */ + lb_memory_size = 970 + 1290 + 1170; + lb_memory_size_c = 970 + 1290 + 1170; + lb_memory_size_a = 970 + 1290 + 1170; + } else { + lb_memory_size = 970 + 1290 + 484; + lb_memory_size_c = 970 + 1290 + 484; + lb_memory_size_a = 970 + 1290 + 484; + } + } + *num_part_y = lb_memory_size / memory_line_size_y; + *num_part_c = lb_memory_size_c / memory_line_size_c; + num_partitions_a = lb_memory_size_a / memory_line_size_a; + + if (scl_data->lb_params.alpha_en + && (num_partitions_a < *num_part_y)) + *num_part_y = num_partitions_a; + + if (*num_part_y > 32) + *num_part_y = 32; + if (*num_part_c > 32) + *num_part_c = 32; +} + +static struct dpp_funcs dcn32_dpp_funcs = { + .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, + .dpp_read_state = dpp30_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = NULL, + .dpp_set_pre_degam = dpp3_set_pre_degam, + .dpp_program_input_lut = NULL, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp3_cnv_setup, + .dpp_program_degamma_pwl = NULL, + .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, + .dpp_program_cm_bias = dpp3_program_cm_bias, + + .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP + .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) + .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) + + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp3_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp1_dppclk_control, + .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, +}; + + +static struct dpp_caps dcn32_dpp_cap = { + .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT, + .max_lb_partitions = 31, + .dscl_calc_lb_num_partitions = dscl32_calc_lb_num_partitions, +}; + +bool dpp32_construct( + struct dcn3_dpp *dpp, + struct dc_context *ctx, + uint32_t inst, + const struct dcn3_dpp_registers *tf_regs, + const struct dcn3_dpp_shift *tf_shift, + const struct dcn3_dpp_mask *tf_mask) +{ + dpp->base.ctx = ctx; + + dpp->base.inst = inst; + dpp->base.funcs = &dcn32_dpp_funcs; + dpp->base.caps = &dcn32_dpp_cap; + + dpp->tf_regs = tf_regs; + dpp->tf_shift = tf_shift; + dpp->tf_mask = tf_mask; + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h new file mode 100644 index 000000000000..572958d287eb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h @@ -0,0 +1,38 @@ +/* Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN32_DPP_H__ +#define __DCN32_DPP_H__ + +#include "dcn20/dcn20_dpp.h" +#include "dcn30/dcn30_dpp.h" + +bool dpp32_construct(struct dcn3_dpp *dpp3, + struct dc_context *ctx, + uint32_t inst, + const struct dcn3_dpp_registers *tf_regs, + const struct dcn3_dpp_shift *tf_shift, + const struct dcn3_dpp_mask *tf_mask); + +#endif /* __DCN32_DPP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt new file mode 100644 index 000000000000..91df5db26435 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/CMakeLists.txt @@ -0,0 +1,4 @@ +dal3_subdirectory_sources( + dcn35_dpp.c + dcn35_dpp.h +) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c new file mode 100644 index 000000000000..0146b36b93d7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "dcn35/dcn35_dpp.h" +#include "reg_helper.h" + +#define REG(reg) dpp->tf_regs->reg + +#define CTX dpp->base.ctx + +#undef FN +#define FN(reg_name, field_name) \ + ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \ + ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name + +bool dpp35_construct(struct dcn3_dpp *dpp, struct dc_context *ctx, + uint32_t inst, const struct dcn3_dpp_registers *tf_regs, + const struct dcn35_dpp_shift *tf_shift, + const struct dcn35_dpp_mask *tf_mask) +{ + return dpp32_construct(dpp, ctx, inst, tf_regs, + (const struct dcn3_dpp_shift *)(tf_shift), + (const struct dcn3_dpp_mask *)(tf_mask)); +} + +void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable) +{ + REG_UPDATE(DPP_CONTROL, DPP_FGCG_REP_DIS, !enable); +} diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h new file mode 100644 index 000000000000..09b84307cd9e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN35_DPP_H__ +#define __DCN35_DPP_H__ + +#include "dcn32/dcn32_dpp.h" + +#define DPP_REG_LIST_SH_MASK_DCN35(mask_sh) \ + DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh) + +#define DPP_REG_FIELD_LIST_DCN35(type) \ + struct { \ + DPP_REG_FIELD_LIST_DCN3(type); \ + type DPP_FGCG_REP_DIS; \ + } + +struct dcn35_dpp_shift { + DPP_REG_FIELD_LIST_DCN35(uint8_t); +}; + +struct dcn35_dpp_mask { + DPP_REG_FIELD_LIST_DCN35(uint32_t); +}; + +bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx, + uint32_t inst, const struct dcn3_dpp_registers *tf_regs, + const struct dcn35_dpp_shift *tf_shift, + const struct dcn35_dpp_mask *tf_mask); + +void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable); + +#endif // __DCN35_DPP_H -- cgit v1.2.3-70-g09d2 From d1b2703cc28769d4bb61d7750a9a74acc916cebe Mon Sep 17 00:00:00 2001 From: Xiaojian Du Date: Fri, 8 Mar 2024 15:48:48 +0800 Subject: drm/amdgpu: add the sensor value of VCN activity This will add the sensor value of VCN activity for some ASICs. Signed-off-by: Xiaojian Du Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 1 + drivers/gpu/drm/amd/pm/amdgpu_pm.c | 37 ++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 32054ecf0b87..afb930b70615 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -150,6 +150,7 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_VCN_POWER_STATE, AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK, AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK, + AMDGPU_PP_SENSOR_VCN_LOAD, }; enum amd_pp_task { diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index f09b9d49297e..efc631bddf4a 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -1581,6 +1581,30 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev, return sysfs_emit(buf, "%d\n", value); } +/** + * DOC: vcn_busy_percent + * + * The amdgpu driver provides a sysfs API for reading how busy the VCN + * is as a percentage. The file vcn_busy_percent is used for this. + * The SMU firmware computes a percentage of load based on the + * aggregate activity level in the IP cores. + */ +static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(ddev); + unsigned int value; + int r; + + r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); + if (r) + return r; + + return sysfs_emit(buf, "%d\n", value); +} + /** * DOC: pcie_bw * @@ -2180,6 +2204,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { .attr_update = pp_od_clk_voltage_attr_update), AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2223,6 +2248,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ gc_ver != IP_VERSION(9, 4, 3)) || gc_ver == IP_VERSION(9, 0, 1)) *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(vcn_busy_percent)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0))) + *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pcie_bw)) { /* PCIe Perf counters won't work on APU nodes */ if (adev->flags & AMD_IS_APU || @@ -4429,6 +4463,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a /* MEM Load */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) seq_printf(m, "MEM Load: %u %%\n", value); + /* VCN Load */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size)) + seq_printf(m, "VCN Load: %u %%\n", value); seq_printf(m, "\n"); -- cgit v1.2.3-70-g09d2 From 2c8c7a2cd8b7c1a2373391fc0220c6c9859fa43c Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sat, 24 Feb 2024 22:38:17 -0500 Subject: drm/amd/display: Update DMUB flags and definitions [WHAT] - Update replay residency tracing design to support more types including tracking PHY and ALPM residency types - Add commands for Replay frame update count profiling - Enhance HWFQ with additional flags to allow for more optimized IPS low power state residencies - Add new flag to indicate if a new frame update needed for ABM to ramp up into steady state Acked-by: Alex Hung Signed-off-by: Anthony Koo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c | 2 +- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 40 ++++++++++++++++++++++-- 2 files changed, 38 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c index b010814706fe..4f559a025cf0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c @@ -244,7 +244,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst, uint16_t param = (uint16_t)(panel_inst << 8); if (is_alpm) - param |= REPLAY_RESIDENCY_MODE_ALPM; + param |= REPLAY_RESIDENCY_FIELD_MODE_ALPM; if (is_start) param |= REPLAY_RESIDENCY_ENABLE; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 4a650ac571d7..b81cd2649db3 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -194,6 +194,11 @@ union abm_flags { * of user backlight level. */ unsigned int abm_gradual_bl_change : 1; + + /** + * @abm_new_frame: Indicates if a new frame update needed for ABM to ramp up into steady + */ + unsigned int abm_new_frame : 1; } bitfields; unsigned int u32All; @@ -2937,18 +2942,47 @@ struct dmub_rb_cmd_psr_set_power_opt { struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data; }; +/** + * Definition of Replay Residency GPINT command. + * Bit[0] - Residency mode for Revision 0 + * Bit[1] - Enable/Disable state + * Bit[2-3] - Revision number + * Bit[4-7] - Residency mode for Revision 1 + * Bit[8] - Panel instance + * Bit[9-15] - Reserved + */ + +enum pr_residency_mode { + PR_RESIDENCY_MODE_PHY = 0x0, + PR_RESIDENCY_MODE_ALPM, + PR_RESIDENCY_MODE_IPS2, + PR_RESIDENCY_MODE_FRAME_CNT, +}; + #define REPLAY_RESIDENCY_MODE_SHIFT (0) #define REPLAY_RESIDENCY_ENABLE_SHIFT (1) +#define REPLAY_RESIDENCY_REVISION_SHIFT (2) +#define REPLAY_RESIDENCY_MODE2_SHIFT (4) #define REPLAY_RESIDENCY_MODE_MASK (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) -# define REPLAY_RESIDENCY_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT) -# define REPLAY_RESIDENCY_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) -# define REPLAY_RESIDENCY_MODE_IPS 0x10 +# define REPLAY_RESIDENCY_FIELD_MODE_PHY (0x0 << REPLAY_RESIDENCY_MODE_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE_ALPM (0x1 << REPLAY_RESIDENCY_MODE_SHIFT) + +#define REPLAY_RESIDENCY_MODE2_MASK (0xF << REPLAY_RESIDENCY_MODE2_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE2_IPS (0x1 << REPLAY_RESIDENCY_MODE2_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE2_FRAME_CNT (0x2 << REPLAY_RESIDENCY_MODE2_SHIFT) #define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) # define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT) # define REPLAY_RESIDENCY_ENABLE (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) +#define REPLAY_RESIDENCY_REVISION_MASK (0x3 << REPLAY_RESIDENCY_REVISION_SHIFT) +# define REPLAY_RESIDENCY_REVISION_0 (0x0 << REPLAY_RESIDENCY_REVISION_SHIFT) +# define REPLAY_RESIDENCY_REVISION_1 (0x1 << REPLAY_RESIDENCY_REVISION_SHIFT) + +/** + * Definition of a replay_state. + */ enum replay_state { REPLAY_STATE_0 = 0x0, REPLAY_STATE_1 = 0x10, -- cgit v1.2.3-70-g09d2 From f1b8479dc997a0e625f3d94920cd778f55e299b5 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 14 Feb 2024 13:32:32 -0700 Subject: drm/amd/display: Add comments to v_total calculation and drop legacy TODO [WHY & HOW] This commit just adds some simple comments to help understand the calculation of V total duration for Freesync. Also, remove a legacy TODO comment from link service type. Acked-by: Alex Hung Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/link_service_types.h | 1 - drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 92dbff22a7c6..1867aac57cf2 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -73,7 +73,6 @@ struct link_training_settings { enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; bool should_set_fec_ready; - /* TODO - factor lane_settings out because it changes during LT */ union dc_dp_ffe_preset *ffe_preset; uint16_t cr_pattern_time; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index fbaa6effd0e3..b19ef58d1555 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -158,13 +158,13 @@ static unsigned int calc_v_total_from_duration( if (duration_in_us > vrr->max_duration_in_us) duration_in_us = vrr->max_duration_in_us; - if (dc_is_hdmi_signal(stream->signal)) { + if (dc_is_hdmi_signal(stream->signal)) { // change for HDMI to comply with spec uint32_t h_total_up_scaled; h_total_up_scaled = stream->timing.h_total * 10000; v_total = div_u64((unsigned long long)duration_in_us * stream->timing.pix_clk_100hz + (h_total_up_scaled - 1), - h_total_up_scaled); + h_total_up_scaled); //ceiling for MMax and MMin for MVRR } else { v_total = div64_u64(div64_u64(((unsigned long long)( duration_in_us) * (stream->timing.pix_clk_100hz / 10)), -- cgit v1.2.3-70-g09d2 From dfe9c3cde22957f65476210aec48e3586983d927 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 13 Mar 2024 15:07:10 +0530 Subject: drm/amdgpu: Do a basic health check before reset Check if the device is present in the bus before trying to recover. It could be that device itself is lost from the bus in some hang situations. Signed-off-by: Lijo Lazar Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 5dc24c971b41..efb3b7e74b80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5532,6 +5532,23 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) } +static int amdgpu_device_health_check(struct list_head *device_list_handle) +{ + struct amdgpu_device *tmp_adev; + int ret = 0; + u32 status; + + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status); + if (PCI_POSSIBLE_ERROR(status)) { + dev_err(tmp_adev->dev, "device lost from bus!"); + ret = -ENODEV; + } + } + + return ret; +} + /** * amdgpu_device_gpu_recover - reset the asic and recover scheduler * @@ -5603,6 +5620,12 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, device_list_handle = &device_list; } + if (!amdgpu_sriov_vf(adev)) { + r = amdgpu_device_health_check(device_list_handle); + if (r) + goto end_reset; + } + /* We need to lock reset domain only once both for XGMI and single device */ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, reset_list); @@ -5768,6 +5791,7 @@ skip_sched_resume: reset_list); amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); +end_reset: if (hive) { mutex_unlock(&hive->hive_lock); amdgpu_put_xgmi_hive(hive); -- cgit v1.2.3-70-g09d2 From 6bb89d1340425ff030b7bdb0eec53ca6d12d1313 Mon Sep 17 00:00:00 2001 From: Victor Skvortsov Date: Thu, 14 Mar 2024 17:27:46 -0400 Subject: drm/amdgpu: Skip virt_exchange_init on SDMA poison consumption Host will initiate an FLR in SDMA poison consumption scenario. Guest should wait for FLR message to re-init data exchange. Signed-off-by: Victor Skvortsov Reviewed-by: Zhigang Luo Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 77f5b55decf9..a1bad772d932 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -444,7 +444,8 @@ static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev, amdgpu_virt_fini_data_exchange(adev); xgpu_nv_send_access_requests_with_param(adev, IDH_RAS_POISON, block, 0, 0); - amdgpu_virt_init_data_exchange(adev); + if (block != AMDGPU_RAS_BLOCK__SDMA) + amdgpu_virt_init_data_exchange(adev); } } -- cgit v1.2.3-70-g09d2 From 2d5bb791e24f43b6b4231b7973009987bbcc9b06 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 14 Feb 2024 16:13:50 -0500 Subject: drm/amd/display: Implement update_planes_and_stream_v3 sequence [WHY & HOW] Update planes and stream version 3 separates FULL and FAST updates to their own sequences. It aims to clean up frequent checks for update type resulting unnecessary branching in logic flow. It also adds a new commit minimal transition sequence, which detects the need for minimal transition based on the actual comparison of current and new states instead of "predicting" it based on per feature software policy, i.e. could_mpcc_tree_change_for_active_pipes. The new commit minimal transition sequence is made universal to any power saving features that would use extra free pipes such as Dynamic ODM/MPC Combine, MPO or SubVp. Therefore there is no longer a need to specially handle compatibility problems with transitions among those features as they are now transparent to the new sequence. Reviewed-by: Wenjing Liu Acked-by: Alex Hung Signed-off-by: Wenjing Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 701 +++++++++++++++++++++---------- 1 file changed, 469 insertions(+), 232 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 64ab99628125..48190719bf73 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2118,7 +2118,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c return result; } -static bool commit_minimal_transition_state_legacy(struct dc *dc, +static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *transition_base_context); /** @@ -2184,7 +2184,7 @@ enum dc_status dc_commit_streams(struct dc *dc, } if (handle_exit_odm2to1) - res = commit_minimal_transition_state_legacy(dc, dc->current_state); + res = commit_minimal_transition_state(dc, dc->current_state); context = dc_state_create_current_copy(dc); if (!context) @@ -3082,6 +3082,63 @@ static void restore_planes_and_stream_state( *stream->out_transfer_func = scratch->out_transfer_func; } +/** + * update_seamless_boot_flags() - Helper function for updating seamless boot flags + * + * @dc: Current DC state + * @context: New DC state to be programmed + * @surface_count: Number of surfaces that have an updated + * @stream: Corresponding stream to be updated in the current flip + * + * Updating seamless boot flags do not need to be part of the commit sequence. This + * helper function will update the seamless boot flags on each flip (if required) + * outside of the HW commit sequence (fast or slow). + * + * Return: void + */ +static void update_seamless_boot_flags(struct dc *dc, + struct dc_state *context, + int surface_count, + struct dc_stream_state *stream) +{ + if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { + /* Optimize seamless boot flag keeps clocks and watermarks high until + * first flip. After first flip, optimization is required to lower + * bandwidth. Important to note that it is expected UEFI will + * only light up a single display on POST, therefore we only expect + * one stream with seamless boot flag set. + */ + if (stream->apply_seamless_boot_optimization) { + stream->apply_seamless_boot_optimization = false; + + if (get_seamless_boot_stream_count(context) == 0) + dc->optimized_required = true; + } + } +} + +/** + * update_planes_and_stream_state() - The function takes planes and stream + * updates as inputs and determines the appropriate update type. If update type + * is FULL, the function allocates a new context, populates and validates it. + * Otherwise, it updates current dc context. The function will return both + * new_context and new_update_type back to the caller. The function also backs + * up both current and new contexts into corresponding dc state scratch memory. + * TODO: The function does too many things, and even conditionally allocates dc + * context memory implicitly. We should consider to break it down. + * + * @dc: Current DC state + * @srf_updates: an array of surface updates + * @surface_count: surface update count + * @stream: Corresponding stream to be updated + * @stream_update: stream update + * @new_update_type: [out] determined update type by the function + * @new_context: [out] new context allocated and validated if update type is + * FULL, reference to current context if update type is less than FULL. + * + * Return: true if a valid update is populated into new_context, false + * otherwise. + */ static bool update_planes_and_stream_state(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, @@ -3202,6 +3259,7 @@ static bool update_planes_and_stream_state(struct dc *dc, resource_build_test_pattern_params(&context->res_ctx, otg_master); } } + update_seamless_boot_flags(dc, context, surface_count, stream); *new_context = context; *new_update_type = update_type; @@ -4117,6 +4175,10 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, struct dc_state *minimal_transition_context = NULL; unsigned int i, j; + minimal_transition_context = dc_state_create_copy(base_context); + if (!minimal_transition_context) + return NULL; + if (!dc->config.is_vmin_only_asic) { policy->mpc_policy = dc->debug.pipe_split_policy; dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; @@ -4126,10 +4188,6 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, policy->subvp_policy = dc->debug.force_disable_subvp; dc->debug.force_disable_subvp = true; - minimal_transition_context = dc_state_create_copy(base_context); - if (!minimal_transition_context) - return NULL; - /* commit minimal state */ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { for (i = 0; i < minimal_transition_context->stream_count; i++) { @@ -4153,69 +4211,178 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, return minimal_transition_context; } +static bool is_pipe_topology_transition_seamless_with_intermediate_step( + struct dc *dc, + struct dc_state *initial_state, + struct dc_state *intermediate_state, + struct dc_state *final_state) +{ + return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state, + intermediate_state) && + dc->hwss.is_pipe_topology_transition_seamless(dc, + intermediate_state, final_state); +} + +static void swap_and_free_current_context(struct dc *dc, + struct dc_state *new_context, struct dc_stream_state *stream) +{ + + int i; + struct dc_state *old = dc->current_state; + struct pipe_ctx *pipe_ctx; + + /* Since memory free requires elevated IRQ, an interrupt + * request is generated by mem free. If this happens + * between freeing and reassigning the context, our vsync + * interrupt will call into dc and cause a memory + * corruption. Hence, we first reassign the context, + * then free the old context. + */ + dc->current_state = new_context; + dc_state_release(old); + + // clear any forced full updates + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe_ctx = &new_context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->plane_state && pipe_ctx->stream == stream) + pipe_ctx->plane_state->force_full_update = false; + } +} + +static int initialize_empty_surface_updates( + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates) +{ + struct dc_stream_status *status = dc_stream_get_status(stream); + int i; + + for (i = 0; i < status->plane_count; i++) + srf_updates[i].surface = status->plane_states[i]; + + return status->plane_count; +} + +static bool commit_minimal_transition_based_on_new_context(struct dc *dc, + struct dc_state *new_context, + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates, + int surface_count) +{ + bool success = false; + struct pipe_split_policy_backup policy; + struct dc_state *intermediate_context = + create_minimal_transition_state(dc, new_context, + &policy); + + if (intermediate_context) { + if (is_pipe_topology_transition_seamless_with_intermediate_step( + dc, + dc->current_state, + intermediate_context, + new_context)) { + DC_LOG_DC("commit minimal transition state: base = new state\n"); + commit_planes_for_stream(dc, srf_updates, + surface_count, stream, NULL, + UPDATE_TYPE_FULL, intermediate_context); + swap_and_free_current_context( + dc, intermediate_context, stream); + dc_state_retain(dc->current_state); + success = true; + } + release_minimal_transition_state( + dc, intermediate_context, &policy); + } + return success; +} + +static bool commit_minimal_transition_based_on_current_context(struct dc *dc, + struct dc_state *new_context, struct dc_stream_state *stream) +{ + bool success = false; + struct pipe_split_policy_backup policy; + struct dc_state *intermediate_context; + struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0}; + int surface_count; + + /* + * Both current and new contexts share the same stream and plane state + * pointers. When new context is validated, stream and planes get + * populated with new updates such as new plane addresses. This makes + * the current context no longer valid because stream and planes are + * modified from the original. We backup current stream and plane states + * into scratch space whenever we are populating new context. So we can + * restore the original values back by calling the restore function now. + * This restores back the original stream and plane states associated + * with the current state. + */ + restore_planes_and_stream_state(&dc->current_state->scratch, stream); + intermediate_context = create_minimal_transition_state(dc, + dc->current_state, &policy); + if (intermediate_context) { + if (is_pipe_topology_transition_seamless_with_intermediate_step( + dc, + dc->current_state, + intermediate_context, + new_context)) { + DC_LOG_DC("commit minimal transition state: base = current state\n"); + surface_count = initialize_empty_surface_updates( + stream, srf_updates); + commit_planes_for_stream(dc, srf_updates, + surface_count, stream, NULL, + UPDATE_TYPE_FULL, intermediate_context); + swap_and_free_current_context( + dc, intermediate_context, stream); + dc_state_retain(dc->current_state); + success = true; + } + release_minimal_transition_state(dc, intermediate_context, + &policy); + } + /* + * Restore stream and plane states back to the values associated with + * new context. + */ + restore_planes_and_stream_state(&new_context->scratch, stream); + return success; +} /** - * commit_minimal_transition_state - Commit a minimal state based on current or new context + * commit_minimal_transition_state_in_dc_update - Commit a minimal state based + * on current or new context * * @dc: DC structure, used to get the current state * @context: New context * @stream: Stream getting the update for the flip * - * The function takes in current state and new state and determine a minimal transition state - * as the intermediate step which could make the transition between current and new states - * seamless. If found, it will commit the minimal transition state and update current state to - * this minimal transition state and return true, if not, it will return false. + * The function takes in current state and new state and determine a minimal + * transition state as the intermediate step which could make the transition + * between current and new states seamless. If found, it will commit the minimal + * transition state and update current state to this minimal transition state + * and return true, if not, it will return false. * * Return: * Return True if the minimal transition succeeded, false otherwise */ -static bool commit_minimal_transition_state(struct dc *dc, - struct dc_state *context, - struct dc_stream_state *stream) -{ - bool success = false; - struct dc_state *minimal_transition_context; - struct pipe_split_policy_backup policy; - - /* commit based on new context */ - minimal_transition_context = create_minimal_transition_state(dc, - context, &policy); - if (minimal_transition_context) { - if (dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, minimal_transition_context) && - dc->hwss.is_pipe_topology_transition_seamless( - dc, minimal_transition_context, context)) { - DC_LOG_DC("%s base = new state\n", __func__); - - success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; - } - release_minimal_transition_state(dc, minimal_transition_context, &policy); - } - - if (!success) { - /* commit based on current context */ - restore_planes_and_stream_state(&dc->current_state->scratch, stream); - minimal_transition_context = create_minimal_transition_state(dc, - dc->current_state, &policy); - if (minimal_transition_context) { - if (dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, minimal_transition_context) && - dc->hwss.is_pipe_topology_transition_seamless( - dc, minimal_transition_context, context)) { - DC_LOG_DC("%s base = current state\n", __func__); - success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK; - } - release_minimal_transition_state(dc, minimal_transition_context, &policy); - } - restore_planes_and_stream_state(&context->scratch, stream); - } - - ASSERT(success); +static bool commit_minimal_transition_state_in_dc_update(struct dc *dc, + struct dc_state *new_context, + struct dc_stream_state *stream, + struct dc_surface_update *srf_updates, + int surface_count) +{ + bool success = commit_minimal_transition_based_on_new_context( + dc, new_context, stream, srf_updates, + surface_count); + if (!success) + success = commit_minimal_transition_based_on_current_context(dc, + new_context, stream); + if (!success) + DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n"); return success; } /** - * commit_minimal_transition_state_legacy - Create a transition pipe split state + * commit_minimal_transition_state - Create a transition pipe split state * * @dc: Used to get the current state status * @transition_base_context: New transition state @@ -4232,7 +4399,7 @@ static bool commit_minimal_transition_state(struct dc *dc, * Return: * Return false if something is wrong in the transition state. */ -static bool commit_minimal_transition_state_legacy(struct dc *dc, +static bool commit_minimal_transition_state(struct dc *dc, struct dc_state *transition_base_context) { struct dc_state *transition_context; @@ -4316,41 +4483,6 @@ static bool commit_minimal_transition_state_legacy(struct dc *dc, return true; } -/** - * update_seamless_boot_flags() - Helper function for updating seamless boot flags - * - * @dc: Current DC state - * @context: New DC state to be programmed - * @surface_count: Number of surfaces that have an updated - * @stream: Corresponding stream to be updated in the current flip - * - * Updating seamless boot flags do not need to be part of the commit sequence. This - * helper function will update the seamless boot flags on each flip (if required) - * outside of the HW commit sequence (fast or slow). - * - * Return: void - */ -static void update_seamless_boot_flags(struct dc *dc, - struct dc_state *context, - int surface_count, - struct dc_stream_state *stream) -{ - if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) { - /* Optimize seamless boot flag keeps clocks and watermarks high until - * first flip. After first flip, optimization is required to lower - * bandwidth. Important to note that it is expected UEFI will - * only light up a single display on POST, therefore we only expect - * one stream with seamless boot flag set. - */ - if (stream->apply_seamless_boot_optimization) { - stream->apply_seamless_boot_optimization = false; - - if (get_seamless_boot_stream_count(context) == 0) - dc->optimized_required = true; - } - } -} - static void populate_fast_updates(struct dc_fast_update *fast_update, struct dc_surface_update *srf_updates, int surface_count, @@ -4470,123 +4602,9 @@ static bool fast_update_only(struct dc *dc, && !full_update_required(dc, srf_updates, surface_count, stream_update, stream); } -bool dc_update_planes_and_stream(struct dc *dc, +static bool update_planes_and_stream_v1(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, - struct dc_stream_update *stream_update) -{ - struct dc_state *context; - enum surface_update_type update_type; - int i; - struct dc_fast_update fast_update[MAX_SURFACES] = {0}; - - /* In cases where MPO and split or ODM are used transitions can - * cause underflow. Apply stream configuration with minimal pipe - * split first to avoid unsupported transitions for active pipes. - */ - bool force_minimal_pipe_splitting = 0; - bool is_plane_addition = 0; - bool is_fast_update_only; - - dc_exit_ips_for_hw_access(dc); - - populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); - is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, - surface_count, stream_update, stream); - force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( - dc, - stream, - srf_updates, - surface_count, - &is_plane_addition); - - /* on plane addition, minimal state is the current one */ - if (force_minimal_pipe_splitting && is_plane_addition && - !commit_minimal_transition_state_legacy(dc, dc->current_state)) - return false; - - if (!update_planes_and_stream_state( - dc, - srf_updates, - surface_count, - stream, - stream_update, - &update_type, - &context)) - return false; - - /* on plane removal, minimal state is the new one */ - if (force_minimal_pipe_splitting && !is_plane_addition) { - if (!commit_minimal_transition_state_legacy(dc, context)) { - dc_state_release(context); - return false; - } - update_type = UPDATE_TYPE_FULL; - } - - if (dc->hwss.is_pipe_topology_transition_seamless && - !dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, context)) { - commit_minimal_transition_state(dc, - context, stream); - } - update_seamless_boot_flags(dc, context, surface_count, stream); - if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { - commit_planes_for_stream_fast(dc, - srf_updates, - surface_count, - stream, - stream_update, - update_type, - context); - } else { - if (!stream_update && - dc->hwss.is_pipe_topology_transition_seamless && - !dc->hwss.is_pipe_topology_transition_seamless( - dc, dc->current_state, context)) { - DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); - BREAK_TO_DEBUGGER(); - } - commit_planes_for_stream( - dc, - srf_updates, - surface_count, - stream, - stream_update, - update_type, - context); - } - - if (dc->current_state != context) { - - /* Since memory free requires elevated IRQL, an interrupt - * request is generated by mem free. If this happens - * between freeing and reassigning the context, our vsync - * interrupt will call into dc and cause a memory - * corruption BSOD. Hence, we first reassign the context, - * then free the old context. - */ - - struct dc_state *old = dc->current_state; - - dc->current_state = context; - dc_state_release(old); - - // clear any forced full updates - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->plane_state && pipe_ctx->stream == stream) - pipe_ctx->plane_state->force_full_update = false; - } - } - return true; -} - -void dc_commit_updates_for_stream(struct dc *dc, - struct dc_surface_update *srf_updates, - int surface_count, - struct dc_stream_state *stream, struct dc_stream_update *stream_update, struct dc_state *state) { @@ -4606,35 +4624,13 @@ void dc_commit_updates_for_stream(struct dc *dc, update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); - /* TODO: Since change commit sequence can have a huge impact, - * we decided to only enable it for DCN3x. However, as soon as - * we get more confident about this change we'll need to enable - * the new sequence for all ASICs. - */ - if (dc->ctx->dce_version >= DCN_VERSION_3_2) { - /* - * Previous frame finished and HW is ready for optimization. - */ - if (update_type == UPDATE_TYPE_FAST) - dc_post_update_surfaces_to_stream(dc); - - dc_update_planes_and_stream(dc, srf_updates, - surface_count, stream, - stream_update); - return; - } - - if (update_type >= update_surface_trace_level) - update_surface_trace(dc, srf_updates, surface_count); - - if (update_type >= UPDATE_TYPE_FULL) { /* initialize scratch memory for building context */ context = dc_state_create_copy(state); if (context == NULL) { DC_ERROR("Failed to allocate new validate context!\n"); - return; + return false; } for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -4651,7 +4647,6 @@ void dc_commit_updates_for_stream(struct dc *dc, dc_post_update_surfaces_to_stream(dc); } - for (i = 0; i < surface_count; i++) { struct dc_plane_state *surface = srf_updates[i].surface; @@ -4676,13 +4671,12 @@ void dc_commit_updates_for_stream(struct dc *dc, if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) { DC_ERROR("Mode validation failed for stream update!\n"); dc_state_release(context); - return; + return false; } } TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES); - update_seamless_boot_flags(dc, context, surface_count, stream); if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) && !dc->debug.enable_legacy_fast_update) { commit_planes_for_stream_fast(dc, @@ -4723,9 +4717,252 @@ void dc_commit_updates_for_stream(struct dc *dc, dc_post_update_surfaces_to_stream(dc); TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce); } + return true; +} + +static bool update_planes_and_stream_v2(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update) +{ + struct dc_state *context; + enum surface_update_type update_type; + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; + + /* In cases where MPO and split or ODM are used transitions can + * cause underflow. Apply stream configuration with minimal pipe + * split first to avoid unsupported transitions for active pipes. + */ + bool force_minimal_pipe_splitting = 0; + bool is_plane_addition = 0; + bool is_fast_update_only; + + populate_fast_updates(fast_update, srf_updates, surface_count, stream_update); + is_fast_update_only = fast_update_only(dc, fast_update, srf_updates, + surface_count, stream_update, stream); + force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes( + dc, + stream, + srf_updates, + surface_count, + &is_plane_addition); + + /* on plane addition, minimal state is the current one */ + if (force_minimal_pipe_splitting && is_plane_addition && + !commit_minimal_transition_state(dc, dc->current_state)) + return false; + + if (!update_planes_and_stream_state( + dc, + srf_updates, + surface_count, + stream, + stream_update, + &update_type, + &context)) + return false; + + /* on plane removal, minimal state is the new one */ + if (force_minimal_pipe_splitting && !is_plane_addition) { + if (!commit_minimal_transition_state(dc, context)) { + dc_state_release(context); + return false; + } + update_type = UPDATE_TYPE_FULL; + } + + if (dc->hwss.is_pipe_topology_transition_seamless && + !dc->hwss.is_pipe_topology_transition_seamless( + dc, dc->current_state, context)) + commit_minimal_transition_state_in_dc_update(dc, context, stream, + srf_updates, surface_count); + + if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) { + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } else { + if (!stream_update && + dc->hwss.is_pipe_topology_transition_seamless && + !dc->hwss.is_pipe_topology_transition_seamless( + dc, dc->current_state, context)) { + DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n"); + BREAK_TO_DEBUGGER(); + } + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + context); + } + if (dc->current_state != context) + swap_and_free_current_context(dc, context, stream); + return true; +} - return; +static void commit_planes_and_stream_update_on_current_context(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + enum surface_update_type update_type) +{ + struct dc_fast_update fast_update[MAX_SURFACES] = {0}; + ASSERT(update_type < UPDATE_TYPE_FULL); + populate_fast_updates(fast_update, srf_updates, surface_count, + stream_update); + if (fast_update_only(dc, fast_update, srf_updates, surface_count, + stream_update, stream) && + !dc->debug.enable_legacy_fast_update) + commit_planes_for_stream_fast(dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + dc->current_state); + else + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + dc->current_state); +} + +static void commit_planes_and_stream_update_with_new_context(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + enum surface_update_type update_type, + struct dc_state *new_context) +{ + ASSERT(update_type >= UPDATE_TYPE_FULL); + if (!dc->hwss.is_pipe_topology_transition_seamless(dc, + dc->current_state, new_context)) + /* + * It is required by the feature design that all pipe topologies + * using extra free pipes for power saving purposes such as + * dynamic ODM or SubVp shall only be enabled when it can be + * transitioned seamlessly to AND from its minimal transition + * state. A minimal transition state is defined as the same dc + * state but with all power saving features disabled. So it uses + * the minimum pipe topology. When we can't seamlessly + * transition from state A to state B, we will insert the + * minimal transition state A' or B' in between so seamless + * transition between A and B can be made possible. + */ + commit_minimal_transition_state_in_dc_update(dc, new_context, + stream, srf_updates, surface_count); + + commit_planes_for_stream( + dc, + srf_updates, + surface_count, + stream, + stream_update, + update_type, + new_context); +} + +static bool update_planes_and_stream_v3(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update) +{ + struct dc_state *new_context; + enum surface_update_type update_type; + + /* + * When this function returns true and new_context is not equal to + * current state, the function allocates and validates a new dc state + * and assigns it to new_context. The function expects that the caller + * is responsible to free this memory when new_context is no longer + * used. We swap current with new context and free current instead. So + * new_context's memory will live until the next full update after it is + * replaced by a newer context. Refer to the use of + * swap_and_free_current_context below. + */ + if (!update_planes_and_stream_state(dc, srf_updates, surface_count, + stream, stream_update, &update_type, + &new_context)) + return false; + + if (new_context == dc->current_state) { + commit_planes_and_stream_update_on_current_context(dc, + srf_updates, surface_count, stream, + stream_update, update_type); + } else { + commit_planes_and_stream_update_with_new_context(dc, + srf_updates, surface_count, stream, + stream_update, update_type, new_context); + swap_and_free_current_context(dc, new_context, stream); + } + + return true; +} + +bool dc_update_planes_and_stream(struct dc *dc, + struct dc_surface_update *srf_updates, int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update) +{ + dc_exit_ips_for_hw_access(dc); + /* + * update planes and stream version 3 separates FULL and FAST updates + * to their own sequences. It aims to clean up frequent checks for + * update type resulting unnecessary branching in logic flow. It also + * adds a new commit minimal transition sequence, which detects the need + * for minimal transition based on the actual comparison of current and + * new states instead of "predicting" it based on per feature software + * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit + * minimal transition sequence is made universal to any power saving + * features that would use extra free pipes such as Dynamic ODM/MPC + * Combine, MPO or SubVp. Therefore there is no longer a need to + * specially handle compatibility problems with transitions among those + * features as they are now transparent to the new sequence. + */ + if (dc->ctx->dce_version > DCN_VERSION_3_51) + return update_planes_and_stream_v3(dc, srf_updates, + surface_count, stream, stream_update); + return update_planes_and_stream_v2(dc, srf_updates, + surface_count, stream, stream_update); +} + +void dc_commit_updates_for_stream(struct dc *dc, + struct dc_surface_update *srf_updates, + int surface_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + struct dc_state *state) +{ + dc_exit_ips_for_hw_access(dc); + /* TODO: Since change commit sequence can have a huge impact, + * we decided to only enable it for DCN3x. However, as soon as + * we get more confident about this change we'll need to enable + * the new sequence for all ASICs. + */ + if (dc->ctx->dce_version > DCN_VERSION_3_51) { + update_planes_and_stream_v3(dc, srf_updates, surface_count, + stream, stream_update); + return; + } + if (dc->ctx->dce_version >= DCN_VERSION_3_2) { + update_planes_and_stream_v2(dc, srf_updates, surface_count, + stream, stream_update); + return; + } + update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, + stream_update, state); } uint8_t dc_get_current_stream_count(struct dc *dc) -- cgit v1.2.3-70-g09d2 From d62d5551dd615f9e488b13595d69b308cd019e16 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 23 Feb 2024 15:06:39 -0500 Subject: drm/amd/display: Backup and restore only on full updates [WHY & HOW] Since the backup and restore for plane and stream states has a significant amount of data to copy, we will change the backup and restore sequence to only take place during full updates. We will also move the scratch memory to struct dc instead of dc_state to avoid needing to allocate large amounts of memory every time we create a new DC state. Reviewed-by: Wenjing Liu Acked-by: Alex Hung Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 10 +- drivers/gpu/drm/amd/display/dc/core/dc_state.c | 1 - drivers/gpu/drm/amd/display/dc/dc.h | 164 ++++++++++++++---------- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 22 ---- 4 files changed, 100 insertions(+), 97 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 48190719bf73..6a65aa623e8a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3162,9 +3162,10 @@ static bool update_planes_and_stream_state(struct dc *dc, } context = dc->current_state; - backup_planes_and_stream_state(&dc->current_state->scratch, stream); update_type = dc_check_update_surfaces_for_stream( dc, srf_updates, surface_count, stream_update, stream_status); + if (update_type == UPDATE_TYPE_FULL) + backup_planes_and_stream_state(&dc->scratch.current_state, stream); /* update current stream with the new updates */ copy_stream_update_to_stream(dc, context, stream, stream_update); @@ -3263,7 +3264,8 @@ static bool update_planes_and_stream_state(struct dc *dc, *new_context = context; *new_update_type = update_type; - backup_planes_and_stream_state(&context->scratch, stream); + if (update_type == UPDATE_TYPE_FULL) + backup_planes_and_stream_state(&dc->scratch.new_state, stream); return true; @@ -4316,7 +4318,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc, * This restores back the original stream and plane states associated * with the current state. */ - restore_planes_and_stream_state(&dc->current_state->scratch, stream); + restore_planes_and_stream_state(&dc->scratch.current_state, stream); intermediate_context = create_minimal_transition_state(dc, dc->current_state, &policy); if (intermediate_context) { @@ -4343,7 +4345,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc, * Restore stream and plane states back to the values associated with * new context. */ - restore_planes_and_stream_state(&new_context->scratch, stream); + restore_planes_and_stream_state(&dc->scratch.new_state, stream); return success; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 5cc7f8da209c..cce4e1c465b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -310,7 +310,6 @@ void dc_state_destruct(struct dc_state *state) memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd)); state->dmub_cmd_count = 0; memset(&state->perf_params, 0, sizeof(state->perf_params)); - memset(&state->scratch, 0, sizeof(state->scratch)); } void dc_state_retain(struct dc_state *state) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 09c6a393642a..9629bd9252b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1004,76 +1004,6 @@ struct dc_current_properties { unsigned int cursor_size_limit; }; -struct dc { - struct dc_debug_options debug; - struct dc_versions versions; - struct dc_caps caps; - struct dc_cap_funcs cap_funcs; - struct dc_config config; - struct dc_bounding_box_overrides bb_overrides; - struct dc_bug_wa work_arounds; - struct dc_context *ctx; - struct dc_phy_addr_space_config vm_pa_config; - - uint8_t link_count; - struct dc_link *links[MAX_PIPES * 2]; - struct link_service *link_srv; - - struct dc_state *current_state; - struct resource_pool *res_pool; - - struct clk_mgr *clk_mgr; - - /* Display Engine Clock levels */ - struct dm_pp_clock_levels sclk_lvls; - - /* Inputs into BW and WM calculations. */ - struct bw_calcs_dceip *bw_dceip; - struct bw_calcs_vbios *bw_vbios; - struct dcn_soc_bounding_box *dcn_soc; - struct dcn_ip_params *dcn_ip; - struct display_mode_lib dml; - - /* HW functions */ - struct hw_sequencer_funcs hwss; - struct dce_hwseq *hwseq; - - /* Require to optimize clocks and bandwidth for added/removed planes */ - bool optimized_required; - bool wm_optimized_required; - bool idle_optimizations_allowed; - bool enable_c20_dtm_b0; - - /* Require to maintain clocks and bandwidth for UEFI enabled HW */ - - /* FBC compressor */ - struct compressor *fbc_compressor; - - struct dc_debug_data debug_data; - struct dpcd_vendor_signature vendor_signature; - - const char *build_id; - struct vm_helper *vm_helper; - - uint32_t *dcn_reg_offsets; - uint32_t *nbio_reg_offsets; - uint32_t *clk_reg_offsets; - - /* Scratch memory */ - struct { - struct { - /* - * For matching clock_limits table in driver with table - * from PMFW. - */ - struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; - } update_bw_bounding_box; - } scratch; - - struct dml2_configuration_options dml2_options; - enum dc_acpi_cm_power_state power_state; -}; - enum frame_buffer_mode { FRAME_BUFFER_MODE_LOCAL_ONLY = 0, FRAME_BUFFER_MODE_ZFB_ONLY, @@ -1363,6 +1293,100 @@ struct dc_plane_info { int layer_index; }; +#include "dc_stream.h" + +struct dc_scratch_space { + /* used to temporarily backup plane states of a stream during + * dc update. The reason is that plane states are overwritten + * with surface updates in dc update. Once they are overwritten + * current state is no longer valid. We want to temporarily + * store current value in plane states so we can still recover + * a valid current state during dc update. + */ + struct dc_plane_state plane_states[MAX_SURFACE_NUM]; + struct dc_gamma gamma_correction[MAX_SURFACE_NUM]; + struct dc_transfer_func in_transfer_func[MAX_SURFACE_NUM]; + struct dc_3dlut lut3d_func[MAX_SURFACE_NUM]; + struct dc_transfer_func in_shaper_func[MAX_SURFACE_NUM]; + struct dc_transfer_func blend_tf[MAX_SURFACE_NUM]; + + struct dc_stream_state stream_state; + struct dc_transfer_func out_transfer_func; +}; + +struct dc { + struct dc_debug_options debug; + struct dc_versions versions; + struct dc_caps caps; + struct dc_cap_funcs cap_funcs; + struct dc_config config; + struct dc_bounding_box_overrides bb_overrides; + struct dc_bug_wa work_arounds; + struct dc_context *ctx; + struct dc_phy_addr_space_config vm_pa_config; + + uint8_t link_count; + struct dc_link *links[MAX_PIPES * 2]; + struct link_service *link_srv; + + struct dc_state *current_state; + struct resource_pool *res_pool; + + struct clk_mgr *clk_mgr; + + /* Display Engine Clock levels */ + struct dm_pp_clock_levels sclk_lvls; + + /* Inputs into BW and WM calculations. */ + struct bw_calcs_dceip *bw_dceip; + struct bw_calcs_vbios *bw_vbios; + struct dcn_soc_bounding_box *dcn_soc; + struct dcn_ip_params *dcn_ip; + struct display_mode_lib dml; + + /* HW functions */ + struct hw_sequencer_funcs hwss; + struct dce_hwseq *hwseq; + + /* Require to optimize clocks and bandwidth for added/removed planes */ + bool optimized_required; + bool wm_optimized_required; + bool idle_optimizations_allowed; + bool enable_c20_dtm_b0; + + /* Require to maintain clocks and bandwidth for UEFI enabled HW */ + + /* FBC compressor */ + struct compressor *fbc_compressor; + + struct dc_debug_data debug_data; + struct dpcd_vendor_signature vendor_signature; + + const char *build_id; + struct vm_helper *vm_helper; + + uint32_t *dcn_reg_offsets; + uint32_t *nbio_reg_offsets; + uint32_t *clk_reg_offsets; + + /* Scratch memory */ + struct { + struct { + /* + * For matching clock_limits table in driver with table + * from PMFW. + */ + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; + } update_bw_bounding_box; + struct dc_scratch_space current_state; + struct dc_scratch_space new_state; + } scratch; + + struct dml2_configuration_options dml2_options; + enum dc_acpi_cm_power_state power_state; + +}; + struct dc_scaling_info { struct rect src_rect; struct rect dst_rect; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index b1b72e688f74..34764094f546 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -522,25 +522,6 @@ struct dc_dmub_cmd { enum dm_dmub_wait_type wait_type; }; -struct dc_scratch_space { - /* used to temporarily backup plane states of a stream during - * dc update. The reason is that plane states are overwritten - * with surface updates in dc update. Once they are overwritten - * current state is no longer valid. We want to temporarily - * store current value in plane states so we can still recover - * a valid current state during dc update. - */ - struct dc_plane_state plane_states[MAX_SURFACE_NUM]; - struct dc_gamma gamma_correction[MAX_SURFACE_NUM]; - struct dc_transfer_func in_transfer_func[MAX_SURFACE_NUM]; - struct dc_3dlut lut3d_func[MAX_SURFACE_NUM]; - struct dc_transfer_func in_shaper_func[MAX_SURFACE_NUM]; - struct dc_transfer_func blend_tf[MAX_SURFACE_NUM]; - - struct dc_stream_state stream_state; - struct dc_transfer_func out_transfer_func; -}; - /** * struct dc_state - The full description of a state requested by users */ @@ -622,9 +603,6 @@ struct dc_state { struct { unsigned int stutter_period_us; } perf_params; - - - struct dc_scratch_space scratch; }; struct replay_context { -- cgit v1.2.3-70-g09d2 From eb004d3ff061a5d3339e5f4981530f40d1212b65 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 25 Feb 2024 14:54:43 -0500 Subject: drm/amd/display: 3.2.275 - Support long vblank feature - Add monitor patch for specific eDP - Init DPPCLK from SMU on dcn32 - Update odm when ODM combine is changed on an otg master pipe with no plane - Fix idle check for shared firmware state - Add guards for idle on reg read/write - Guard cursor idle reallow by DC debug option - Add debug counters to IPS exit prints - Add left edge pixel for YCbCr422/420 + ODM pipe split - Amend coasting vtotal for replay low hz - Refactor DPP into a component directory - Set the power_down_on_boot function pointer to null - Implement update_planes_and_stream_v3 sequence - Lock all enabled otg pipes even with no planes - Implement wait_for_odm_update_pending_complete - Add a dc_state NULL check in dc_state_release - Backup and restore only on full updates - Update DMUB flags and definitions - Return the correct HDCP error code - Add comments to v_total calculation and drop legacy TODO Acked-by: Alex Hung Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 9629bd9252b4..e17ddda8ec38 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -51,7 +51,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.274" +#define DC_VER "3.2.275" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3-70-g09d2 From 54d9490314afd1339f40fb7bd631b82920dc2742 Mon Sep 17 00:00:00 2001 From: Muhammad Ahmed Date: Fri, 23 Feb 2024 12:33:10 -0500 Subject: drm/amd/display: Set the power_down_on_boot function pointer to null [WHY] Blackscreen hang @ PC EF000025 when trying to wake up from S0i3. DCN gets powered off due to dc_power_down_on_boot() being called after timeout. [HOW] Setting the power_down_on_boot function pointer to null since we don't expect the function to be called for APU. Cc: Mario Limonciello Cc: Alex Deucher Reviewed-by: Nicholas Kazlauskas Acked-by: Alex Hung Signed-off-by: Muhammad Ahmed Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index dce620d359a6..d4e0abbef28e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -39,7 +39,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .program_gamut_remap = dcn30_program_gamut_remap, .init_hw = dcn35_init_hw, - .power_down_on_boot = dcn35_power_down_on_boot, + .power_down_on_boot = NULL, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, -- cgit v1.2.3-70-g09d2 From 2e52dd1ee409bf14aae65b01e084857fedef8a7a Mon Sep 17 00:00:00 2001 From: Ovidiu Bunea Date: Thu, 29 Feb 2024 13:49:04 -0500 Subject: drm/amd/display: Revert "Set the power_down_on_boot function pointer to null" This reverts commit 54d9490314af ("drm/amd/display: Set the power_down_on_boot function pointer to null") [why & how] This commit breaks S0i3 entry because DCN does not enter IPS2. Cc: Mario Limonciello Cc: Alex Deucher Reviewed-by: Nicholas Kazlauskas Reviewed-by: Charlene Liu Acked-by: Wayne Lin Signed-off-by: Ovidiu Bunea Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index d4e0abbef28e..dce620d359a6 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -39,7 +39,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .program_gamut_remap = dcn30_program_gamut_remap, .init_hw = dcn35_init_hw, - .power_down_on_boot = NULL, + .power_down_on_boot = dcn35_power_down_on_boot, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = NULL, .program_front_end_for_ctx = dcn20_program_front_end_for_ctx, -- cgit v1.2.3-70-g09d2 From d72e2bdac4ad5422f8d880d323f6ab6a7469cb2b Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Fri, 8 Mar 2024 15:56:20 +0530 Subject: drm/amdgpu: add the hw_ip version of all IP's MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add all the IP's version information on a SOC to the devcoredump. Signed-off-by: Sunil Khatri Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c | 60 +++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index a0dbccad2f53..1434e9a5506b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -29,6 +29,42 @@ #include "sienna_cichlid.h" #include "smu_v13_0_10.h" +const char *hw_ip_names[MAX_HWIP] = { + [GC_HWIP] = "GC", + [HDP_HWIP] = "HDP", + [SDMA0_HWIP] = "SDMA0", + [SDMA1_HWIP] = "SDMA1", + [SDMA2_HWIP] = "SDMA2", + [SDMA3_HWIP] = "SDMA3", + [SDMA4_HWIP] = "SDMA4", + [SDMA5_HWIP] = "SDMA5", + [SDMA6_HWIP] = "SDMA6", + [SDMA7_HWIP] = "SDMA7", + [LSDMA_HWIP] = "LSDMA", + [MMHUB_HWIP] = "MMHUB", + [ATHUB_HWIP] = "ATHUB", + [NBIO_HWIP] = "NBIO", + [MP0_HWIP] = "MP0", + [MP1_HWIP] = "MP1", + [UVD_HWIP] = "UVD/JPEG/VCN", + [VCN1_HWIP] = "VCN1", + [VCE_HWIP] = "VCE", + [VPE_HWIP] = "VPE", + [DF_HWIP] = "DF", + [DCE_HWIP] = "DCE", + [OSSSYS_HWIP] = "OSSSYS", + [SMUIO_HWIP] = "SMUIO", + [PWR_HWIP] = "PWR", + [NBIF_HWIP] = "NBIF", + [THM_HWIP] = "THM", + [CLK_HWIP] = "CLK", + [UMC_HWIP] = "UMC", + [RSMU_HWIP] = "RSMU", + [XGMI_HWIP] = "XGMI", + [DCI_HWIP] = "DCI", + [PCIE_HWIP] = "PCIE", +}; + int amdgpu_reset_init(struct amdgpu_device *adev) { int ret = 0; @@ -196,6 +232,30 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, coredump->reset_task_info.process_name, coredump->reset_task_info.pid); + /* GPU IP's information of the SOC */ + if (coredump->adev) { + drm_printf(&p, "\nIP Information\n"); + drm_printf(&p, "SOC Family: %d\n", coredump->adev->family); + drm_printf(&p, "SOC Revision id: %d\n", coredump->adev->rev_id); + drm_printf(&p, "SOC External Revision id: %d\n", + coredump->adev->external_rev_id); + + for (int i = 1; i < MAX_HWIP; i++) { + for (int j = 0; j < HWIP_MAX_INSTANCE; j++) { + int ver = coredump->adev->ip_versions[i][j]; + + if (ver) + drm_printf(&p, "HWIP: %s[%d][%d]: v%d.%d.%d.%d.%d\n", + hw_ip_names[i], i, j, + IP_VERSION_MAJ(ver), + IP_VERSION_MIN(ver), + IP_VERSION_REV(ver), + IP_VERSION_VARIANT(ver), + IP_VERSION_SUBREV(ver)); + } + } + } + if (coredump->ring) { drm_printf(&p, "\nRing timed out details\n"); drm_printf(&p, "IP Type: %d Ring Name: %s\n", -- cgit v1.2.3-70-g09d2 From 55fdfddddbbc6365a33757bb009be5f64902c019 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Mon, 4 Mar 2024 10:36:11 -0700 Subject: drm/amd/display: Remove code duplication [Why & How] Remove redundant code Acked-by: Wayne Lin Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 738ee763f24a..bb9f39bf5b5b 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -539,8 +539,6 @@ void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream, mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet); break; case FREESYNC_TYPE_PCON_IN_WHITELIST: - mod_build_adaptive_sync_infopacket_v1(info_packet); - break; case ADAPTIVE_SYNC_TYPE_EDP: mod_build_adaptive_sync_infopacket_v1(info_packet); break; -- cgit v1.2.3-70-g09d2 From bcebe44f6bb6d9a85e0710d0086bc3956e6887ba Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Mon, 4 Mar 2024 11:40:23 -0700 Subject: drm/amd/display: Remove wrong signal from vrr calculation In some of the merge conflict fixes, one '+' was accidentally left at the beginning of the line. Fortunately, this did not cause any major issues since it acted as a number signal. This commit addresses this issue by removing the extra '+'. Acked-by: Wayne Lin Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/freesync/freesync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index b19ef58d1555..d09627c15b9c 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -1057,7 +1057,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->fixed_refresh_in_uhz = 0; refresh_range = div_u64(in_out_vrr->max_refresh_in_uhz + 500000, 1000000) - -+ div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000); + div_u64(in_out_vrr->min_refresh_in_uhz + 500000, 1000000); in_out_vrr->supported = true; } -- cgit v1.2.3-70-g09d2 From a63dce3a4ba917a440c8e17de6d808bcea313f7e Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Mon, 4 Mar 2024 13:38:02 -0700 Subject: drm/amd/display: Enable 2to1 ODM policy for DCN35 [Why & How] Enable 2to1 ODM policy for DCN35 Acked-by: Wayne Lin Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 5d52853cac96..a8f4023ff3b1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -769,7 +769,7 @@ static const struct dc_debug_options debug_defaults_drv = { .support_eDP1_5 = true, .enable_hpo_pg_support = false, .enable_legacy_fast_update = true, - .enable_single_display_2to1_odm_policy = false, + .enable_single_display_2to1_odm_policy = true, .disable_idle_power_optimizations = false, .dmcub_emulation = false, .disable_boot_optimizations = false, -- cgit v1.2.3-70-g09d2 From 86cbadaea4aa468583acb85bb7f33e69f98bb1c3 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Thu, 29 Feb 2024 19:50:52 -0700 Subject: drm/amd/display: Delete duplicated function prototypes [Why & How] dcn32_smu_transfer_wm_table_dram_2_smu is defined twice so one is removed. Also adjust prototype orders. Acked-by: Wayne Lin Signed-off-by: Alex Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h index c76352a817de..5c44ab0e8667 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h @@ -37,10 +37,9 @@ #define DALSMC_Result_OK 0x1 void dcn32_smu_send_fclk_pstate_message(struct clk_mgr_internal *clk_mgr, bool enable); -void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); -void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); void dcn32_smu_send_cab_for_uclk_message(struct clk_mgr_internal *clk_mgr, unsigned int num_ways); void dcn32_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); +void dcn32_smu_set_pme_workaround(struct clk_mgr_internal *clk_mgr); unsigned int dcn32_smu_set_hard_min_by_freq(struct clk_mgr_internal *clk_mgr, uint32_t clk, uint16_t freq_mhz); void dcn32_smu_wait_for_dmub_ack_mclk(struct clk_mgr_internal *clk_mgr, bool enable); -- cgit v1.2.3-70-g09d2 From 34124947837fd91d76312f5fd47b68515fe176ac Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Thu, 29 Feb 2024 19:54:46 -0700 Subject: drm/amd/display: Correct indentations and spaces [Why & How] This fixes indentations and adjust spaces for better readability and code styles. Acked-by: Wayne Lin Signed-off-by: Alex Hung Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 1 + drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c | 15 ++++---- .../drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h | 42 +++++++++++----------- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 3 +- drivers/gpu/drm/amd/display/dc/core/dc_stat.c | 2 +- 5 files changed, 32 insertions(+), 31 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 6450853fea94..645a8991a830 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -1731,6 +1731,7 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1( return 0; } + /** * get_ss_entry_number_from_internal_ss_info_tbl_V3_1 * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 9f0f25aee426..c1a5908b97c8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -329,15 +329,14 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p } break; case AMDGPU_FAMILY_GC_11_0_0: { - struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); - - if (clk_mgr == NULL) { - BREAK_TO_DEBUGGER(); - return NULL; - } + struct clk_mgr_internal *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL); - dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); - return &clk_mgr->base; + if (clk_mgr == NULL) { + BREAK_TO_DEBUGGER(); + return NULL; + } + dcn32_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); + return &clk_mgr->base; } case AMDGPU_FAMILY_GC_11_0_1: { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h index 047d19ea919c..78ca1e5c5e9e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h @@ -37,34 +37,34 @@ typedef enum { } WCK_RATIO_e; typedef struct { - uint32_t FClk; - uint32_t MemClk; - uint32_t Voltage; - uint8_t WckRatio; - uint8_t Spare[3]; + uint32_t FClk; + uint32_t MemClk; + uint32_t Voltage; + uint8_t WckRatio; + uint8_t Spare[3]; } DfPstateTable314_t; //Freq in MHz //Voltage in milli volts with 2 fractional bits typedef struct { - uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; - uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; - uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; - uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; - uint32_t VClocks[NUM_VCN_DPM_LEVELS]; - uint32_t DClocks[NUM_VCN_DPM_LEVELS]; - uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; - DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks[NUM_VCN_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; - uint8_t NumDcfClkLevelsEnabled; - uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk - uint8_t NumSocClkLevelsEnabled; - uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk - uint8_t NumDfPstatesEnabled; - uint8_t spare[3]; + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk + uint8_t NumDfPstatesEnabled; + uint8_t spare[3]; - uint32_t MinGfxClk; - uint32_t MaxGfxClk; + uint32_t MinGfxClk; + uint32_t MaxGfxClk; } DpmClocks314_t; struct dcn314_watermarks { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index ec4bf9432bdb..96b4f68ec374 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -340,7 +340,7 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc, return res_pool; } -void dc_destroy_resource_pool(struct dc *dc) +void dc_destroy_resource_pool(struct dc *dc) { if (dc) { if (dc->res_pool) @@ -1485,6 +1485,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; const struct rect odm_slice_rec = calculate_odm_slice_in_timing_active(pipe_ctx); bool res = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Invalid input */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c index 5f6392ae31a6..cd6570a1e20e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c @@ -61,7 +61,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification /* For HPD/HPD RX, convert dpia port index into link index */ if (notify->type == DMUB_NOTIFICATION_HPD || notify->type == DMUB_NOTIFICATION_HPD_IRQ || - notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || + notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION || notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) { notify->link_index = get_link_index_from_dpia_port_index(dc, notify->link_index); -- cgit v1.2.3-70-g09d2 From 40a2a2b257db81b42827adf6a7aaa24a7d86460a Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Mon, 4 Mar 2024 13:52:58 -0700 Subject: drm/amd/display: Add the MALL size in the fallback function [Why & How] If the driver has issues retrieving the MALL size for the specific hardware, it might fail since the current value is set to zero. This commit addresses this issue by adding a simple constant value to give the drive a chance to start. Acked-by: Wayne Lin Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c | 3 ++- drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index ce1754cc1f46..1c657fe4a9bb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -2048,7 +2048,8 @@ static bool dcn32_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; - dc->caps.mall_size_total = 0; + /* total size = mall per channel * num channels * 1024 * 1024 */ + dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 296a0a8e7145..6c8129734163 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1697,7 +1697,9 @@ static bool dcn321_resource_construct( dc->caps.min_horizontal_blanking_period = 80; dc->caps.dmdata_alloc_size = 2048; dc->caps.mall_size_per_mem_channel = 4; - dc->caps.mall_size_total = 0; + /* total size = mall per channel * num channels * 1024 * 1024 */ + dc->caps.mall_size_total = dc->caps.mall_size_per_mem_channel * dc->ctx->dc_bios->vram_info.num_chans * 1048576; + dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8; dc->caps.cache_line_size = 64; dc->caps.cache_num_ways = 16; -- cgit v1.2.3-70-g09d2 From 96ab4b2ebb8564093ee09b932a4e181aa7b0f258 Mon Sep 17 00:00:00 2001 From: Asad Kamal Date: Wed, 6 Mar 2024 16:40:35 +0800 Subject: drm/amd/pm: Update SMUv13.0.6 PMFW headers Update PMFW interface headers for updated metrics table with pcie link speed and pcie link width Signed-off-by: Asad Kamal Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h index 7b812b9994d7..0b3c2f54a343 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h @@ -123,7 +123,7 @@ typedef enum { VOLTAGE_GUARDBAND_COUNT } GFX_GUARDBAND_e; -#define SMU_METRICS_TABLE_VERSION 0xB +#define SMU_METRICS_TABLE_VERSION 0xC typedef struct __attribute__((packed, aligned(4))) { uint32_t AccumulationCounter; @@ -223,6 +223,10 @@ typedef struct __attribute__((packed, aligned(4))) { // VCN/JPEG ACTIVITY uint32_t VcnBusy[4]; uint32_t JpegBusy[32]; + + // PCIE LINK Speed and width + uint32_t PCIeLinkSpeed; + uint32_t PCIeLinkWidth; } MetricsTableX_t; typedef struct __attribute__((packed, aligned(4))) { -- cgit v1.2.3-70-g09d2 From f37e5d216e22b397310f1f721e61a86b1b38bad7 Mon Sep 17 00:00:00 2001 From: Asad Kamal Date: Mon, 11 Mar 2024 14:03:10 +0800 Subject: drm/amd/pm: Use metric table for pcie speed/width Report pcie link speed/width using metric table in case of one vf & if pmfw support is available, else report directly from registers in case of pf. Skip reporting it for other cases. v2: Skip multi-vf check(Lijo) Signed-off-by: Asad Kamal Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 3957af057d54..4e178916e845 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2228,7 +2228,15 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->gfxclk_lock_status = GET_METRIC_FIELD(GfxLockXCDMak) >> GET_INST(GC, 0); if (!(adev->flags & AMD_IS_APU)) { - if (!amdgpu_sriov_vf(adev)) { + /*Check smu version, PCIE link speed and width will be reported from pmfw metric + * table for both pf & one vf for smu version 85.99.0 or higher else report only + * for pf from registers + */ + if (smu->smc_fw_version >= 0x556300) { + gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth; + gpu_metrics->pcie_link_speed = + pcie_gen_to_speed(metrics_x->PCIeLinkSpeed); + } else if (!amdgpu_sriov_vf(adev)) { link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu); if (link_width_level > MAX_LINK_WIDTH) link_width_level = 0; @@ -2238,6 +2246,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table gpu_metrics->pcie_link_speed = smu_v13_0_6_get_current_pcie_link_speed(smu); } + gpu_metrics->pcie_bandwidth_acc = SMUQ10_ROUND(metrics_x->PcieBandwidthAcc[0]); gpu_metrics->pcie_bandwidth_inst = -- cgit v1.2.3-70-g09d2 From 26d97182bb519df6e528f45df72a2f0ac77cb850 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 13 Mar 2024 12:48:57 -0400 Subject: drm/amdkfd: Rename read_doorbell_id in MQD functions Rename read_doorbell_id function to a more meaningful name, implying what it is used for. No functional change. Suggested-by: Jay Cornwall Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 4 ++-- 7 files changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f4d395e38683..1ce398ab0b3d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1997,7 +1997,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, * check those fields */ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; - if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) { + if (mqd_mgr->check_preemption_failed(dqm->packet_mgr.priv_queue->queue->mqd)) { dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n"); while (halt_if_hws_hang) schedule(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index e5cc697a3ca8..ba3eebb2ca6d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -119,7 +119,7 @@ struct mqd_manager { #if defined(CONFIG_DEBUG_FS) int (*debugfs_show_mqd)(struct seq_file *m, void *data); #endif - uint32_t (*read_doorbell_id)(void *mqd); + uint32_t (*check_preemption_failed)(void *mqd); uint64_t (*mqd_stride)(struct mqd_manager *mm, struct queue_properties *p); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 1a4a69943c71..8f9f56f7a8b0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -206,7 +206,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t read_doorbell_id(void *mqd) +static uint32_t check_preemption_failed(void *mqd) { struct cik_mqd *m = (struct cik_mqd *)mqd; @@ -423,7 +423,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->read_doorbell_id = read_doorbell_id; + mqd->check_preemption_failed = check_preemption_failed; break; case KFD_MQD_TYPE_DIQ: mqd->allocate_mqd = allocate_mqd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 22cbfa1bdadd..d4cf7d845928 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -224,7 +224,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t read_doorbell_id(void *mqd) +static uint32_t check_preemption_failed(void *mqd) { struct v10_compute_mqd *m = (struct v10_compute_mqd *)mqd; @@ -488,7 +488,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->read_doorbell_id = read_doorbell_id; + mqd->check_preemption_failed = check_preemption_failed; pr_debug("%s@%i\n", __func__, __LINE__); break; case KFD_MQD_TYPE_DIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 826bc4f6c8a7..2b9f57c267eb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -278,7 +278,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t read_doorbell_id(void *mqd) +static uint32_t check_preemption_failed(void *mqd) { struct v11_compute_mqd *m = (struct v11_compute_mqd *)mqd; @@ -517,7 +517,7 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->read_doorbell_id = read_doorbell_id; + mqd->check_preemption_failed = check_preemption_failed; pr_debug("%s@%i\n", __func__, __LINE__); break; case KFD_MQD_TYPE_DIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 697b6d530d12..7c93a0932677 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -316,7 +316,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, } -static uint32_t read_doorbell_id(void *mqd) +static uint32_t check_preemption_failed(void *mqd) { struct v9_mqd *m = (struct v9_mqd *)mqd; @@ -881,7 +881,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->read_doorbell_id = read_doorbell_id; + mqd->check_preemption_failed = check_preemption_failed; if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) { mqd->init_mqd = init_mqd_hiq_v9_4_3; mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 3e1a574d4ea6..dbc868e0363f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -237,7 +237,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t read_doorbell_id(void *mqd) +static uint32_t check_preemption_failed(void *mqd) { struct vi_mqd *m = (struct vi_mqd *)mqd; @@ -482,7 +482,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->read_doorbell_id = read_doorbell_id; + mqd->check_preemption_failed = check_preemption_failed; break; case KFD_MQD_TYPE_DIQ: mqd->allocate_mqd = allocate_mqd; -- cgit v1.2.3-70-g09d2 From 0991a4c1929cac4c28d368be738bceb44dfde5f6 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 13 Mar 2024 20:26:55 -0400 Subject: drm/amdkfd: Check preemption status on all XCDs This patch adds the following functionality: - Check the queue preemption status on all XCDs in a partition for GFX 9.4.3. - Update the queue preemption debug message to print the queue doorbell id for which preemption failed. - Change the signature of check preemption failed function to return a bool instead of uint32_t and pass the MQD manager as an argument. Suggested-by: Jay Cornwall Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 3 +-- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 18 ++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 4 +++- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 25 +++++++++++++++++++--- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 4 ++-- 8 files changed, 52 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 1ce398ab0b3d..151fabf84040 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1997,8 +1997,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, * check those fields */ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; - if (mqd_mgr->check_preemption_failed(dqm->packet_mgr.priv_queue->queue->mqd)) { - dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n"); + if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) { while (halt_if_hws_hang) schedule(); return -ETIME; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 050a6936ff84..8746a61a852d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -290,3 +290,21 @@ uint64_t kfd_mqd_stride(struct mqd_manager *mm, { return mm->mqd_size; } + +bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id, + uint32_t inst) +{ + if (doorbell_id) { + struct device *dev = node->adev->dev; + + if (node->adev->xcp_mgr && node->adev->xcp_mgr->num_xcps > 0) + dev_err(dev, "XCC %d: Queue preemption failed for queue with doorbell_id: %x\n", + inst, doorbell_id); + else + dev_err(dev, "Queue preemption failed for queue with doorbell_id: %x\n", + doorbell_id); + return true; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index ba3eebb2ca6d..17cc1f25c8d0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -119,7 +119,7 @@ struct mqd_manager { #if defined(CONFIG_DEBUG_FS) int (*debugfs_show_mqd)(struct seq_file *m, void *data); #endif - uint32_t (*check_preemption_failed)(void *mqd); + bool (*check_preemption_failed)(struct mqd_manager *mm, void *mqd); uint64_t (*mqd_stride)(struct mqd_manager *mm, struct queue_properties *p); @@ -198,4 +198,6 @@ void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev); uint64_t kfd_mqd_stride(struct mqd_manager *mm, struct queue_properties *q); +bool kfd_check_hiq_mqd_doorbell_id(struct kfd_node *node, uint32_t doorbell_id, + uint32_t inst); #endif /* KFD_MQD_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 8f9f56f7a8b0..05f3ac2eaef9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -206,11 +206,11 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t check_preemption_failed(void *mqd) +static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct cik_mqd *m = (struct cik_mqd *)mqd; - return m->queue_doorbell_id0; + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); } static void update_mqd(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index d4cf7d845928..2eff37aaf827 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -224,11 +224,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t check_preemption_failed(void *mqd) +static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct v10_compute_mqd *m = (struct v10_compute_mqd *)mqd; - return m->queue_doorbell_id0; + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); } static int get_wave_state(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 2b9f57c267eb..68dbc0399c87 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -278,11 +278,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t check_preemption_failed(void *mqd) +static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct v11_compute_mqd *m = (struct v11_compute_mqd *)mqd; - return m->queue_doorbell_id0; + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); } static int get_wave_state(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 7c93a0932677..6bddc16808d7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -316,11 +316,11 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, } -static uint32_t check_preemption_failed(void *mqd) +static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct v9_mqd *m = (struct v9_mqd *)mqd; - return m->queue_doorbell_id0; + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); } static int get_wave_state(struct mqd_manager *mm, void *mqd, @@ -607,6 +607,24 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, return err; } +static bool check_preemption_failed_v9_4_3(struct mqd_manager *mm, void *mqd) +{ + uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); + uint32_t xcc_mask = mm->dev->xcc_mask; + int inst = 0, xcc_id; + struct v9_mqd *m; + bool ret = false; + + for_each_inst(xcc_id, xcc_mask) { + m = get_mqd(mqd + hiq_mqd_size * inst); + ret |= kfd_check_hiq_mqd_doorbell_id(mm->dev, + m->queue_doorbell_id0, inst); + ++inst; + } + + return ret; +} + static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj, struct kfd_mem_obj *xcc_mqd_mem_obj, uint64_t offset) @@ -881,15 +899,16 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif - mqd->check_preemption_failed = check_preemption_failed; if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) { mqd->init_mqd = init_mqd_hiq_v9_4_3; mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3; mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3; + mqd->check_preemption_failed = check_preemption_failed_v9_4_3; } else { mqd->init_mqd = init_mqd_hiq; mqd->load_mqd = kfd_hiq_load_mqd_kiq; mqd->destroy_mqd = destroy_hiq_mqd; + mqd->check_preemption_failed = check_preemption_failed; } break; case KFD_MQD_TYPE_DIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index dbc868e0363f..c1fafc502515 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -237,11 +237,11 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd, q->is_active = QUEUE_IS_ACTIVE(*q); } -static uint32_t check_preemption_failed(void *mqd) +static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) { struct vi_mqd *m = (struct vi_mqd *)mqd; - return m->queue_doorbell_id0; + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); } static void update_mqd(struct mqd_manager *mm, void *mqd, -- cgit v1.2.3-70-g09d2 From c59b2767e06ce6448291d71bb44a0821755c8573 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 5 Mar 2024 11:28:15 -0700 Subject: drm/amd/display: Move define to the proper header [Why & How] DCN3_16_MIN_COMPBUF_SIZE_KB is defined in the dcn316_resource.c file. This header fit better in the dcn31_fpu.h together with similar defines. Acked-by: Wayne Lin Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h | 1 + drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index 8f9c8faed260..d2ae43a82ba5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -30,6 +30,7 @@ #define DCN3_15_DEFAULT_DET_SIZE 192 #define DCN3_15_MIN_COMPBUF_SIZE_KB 128 #define DCN3_16_DEFAULT_DET_SIZE 192 +#define DCN3_16_MIN_COMPBUF_SIZE_KB 128 void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes, int pipe_cnt); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index b9753d4606f8..83a71f1b933d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -125,7 +125,6 @@ #include "link_enc_cfg.h" #define DCN3_16_MAX_DET_SIZE 384 -#define DCN3_16_MIN_COMPBUF_SIZE_KB 128 #define DCN3_16_CRB_SEGMENT_SIZE_KB 64 enum dcn31_clk_src_array_id { -- cgit v1.2.3-70-g09d2 From 119badce8bfdfd059468b9e78694333579713570 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 7 Mar 2024 18:06:58 -0700 Subject: drm/amd/display: Enable fast update for DCN314 [Why & How] Enable legacy fast update for DCN314 Acked-by: Wayne Lin Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index c97391edb5ff..9bd0d72e74c7 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -925,6 +925,7 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = true, + .enable_legacy_fast_update = true, .using_dml2 = false, }; -- cgit v1.2.3-70-g09d2 From 75e77fb7697f2ecb99861924a0b77a0303927ca2 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 7 Mar 2024 18:17:31 -0700 Subject: drm/amd/display: Remove legacy code [Why & How] Remove legacy code which is unnecessary. Acked-by: Wayne Lin Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/resource/dcn314/dcn314_resource.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index 9bd0d72e74c7..3bae606ed700 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -929,24 +929,6 @@ static const struct dc_debug_options debug_defaults_drv = { .using_dml2 = false, }; -static const struct dc_debug_options debug_defaults_diags = { - .disable_dmcu = true, - .force_abm_enable = false, - .timing_trace = true, - .clock_trace = true, - .disable_dpp_power_gate = true, - .disable_hubp_power_gate = true, - .disable_clock_gate = true, - .disable_pplib_clock_request = true, - .disable_pplib_wm_range = true, - .disable_stutter = false, - .scl_reset_length10 = true, - .dwb_fi_phase = -1, // -1 = disable - .dmub_command_table = true, - .enable_tri_buf = true, - .use_max_lb = true -}; - static const struct dc_panel_config panel_config_defaults = { .psr = { .disable_psr = false, @@ -1939,8 +1921,6 @@ static bool dcn314_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; - else - dc->debug = debug_defaults_diags; /* Disable pipe power gating */ dc->debug.disable_dpp_power_gate = true; -- cgit v1.2.3-70-g09d2 From 3a13d1fddaf51b98cdba20b486cb8fd6080b71b7 Mon Sep 17 00:00:00 2001 From: Sherry Wang Date: Wed, 7 Sep 2022 00:12:44 +0800 Subject: drm/amd/display: correct hostvm flag [Why] Hostvm should be enabled/disabled accordding to the status of riommu_active, but hostvm always be disabled on DCN31 which causes underflow [How] Set correct hostvm flag on DCN31 Acked-by: Wayne Lin Signed-off-by: Sherry Wang Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 04d142f97474..2fb1d00ff965 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -892,7 +892,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, .enable_legacy_fast_update = true, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ - .dml_hostvm_override = DML_HOSTVM_OVERRIDE_FALSE, + .dml_hostvm_override = DML_HOSTVM_NO_OVERRIDE, .using_dml2 = false, }; -- cgit v1.2.3-70-g09d2 From 713537e3b9fa7dff4557474061cb11266e0a6854 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 7 Mar 2024 18:27:00 -0700 Subject: drm/amd/display: Comments adjustments [Why & How] This commit just drop some old comments and update a typo in another one. Acked-by: Wayne Lin Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 2fb1d00ff965..ce3699f62602 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -75,7 +75,6 @@ #include "dcn30/dcn30_dwb.h" #include "dcn30/dcn30_mmhubbub.h" -// TODO: change include headers /amd/include/asic_reg after upstream #include "yellow_carp_offset.h" #include "dcn/dcn_3_1_2_offset.h" #include "dcn/dcn_3_1_2_sh_mask.h" @@ -1771,7 +1770,7 @@ bool dcn31_validate_bandwidth(struct dc *dc, out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); DC_FP_END(); - // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg + // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) fast_validate = false; -- cgit v1.2.3-70-g09d2 From 0ba7ad7e42ea9d38c39d98137af3ad6a18f2ee8f Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Mon, 11 Mar 2024 17:20:01 -0600 Subject: drm/amd/display: Add missing registers and offset [Why & How] Registers and offset are missing. Add it back Acked-by: Wayne Lin Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/include/asic_reg/dcn/dcn_3_2_1_offset.h | 37 +++++++++++++++++++++- .../amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h | 16 ++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h index 3bd8792fd7b3..a04b8c32c564 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h @@ -1719,6 +1719,10 @@ #define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2 #define regFMON_CTRL 0x0541 #define regFMON_CTRL_BASE_IDX 2 +#define regDCHUBBUB_TEST_DEBUG_INDEX 0x0542 +#define regDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regDCHUBBUB_TEST_DEBUG_DATA 0x0543 +#define regDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dchubbubl_hubbub_sdpif_dispdec @@ -3573,6 +3577,10 @@ #define regCM0_CM_DEALPHA_BASE_IDX 2 #define regCM0_CM_COEF_FORMAT 0x0d8c #define regCM0_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d +#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e +#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec @@ -3959,6 +3967,10 @@ #define regCM1_CM_DEALPHA_BASE_IDX 2 #define regCM1_CM_COEF_FORMAT 0x0ef7 #define regCM1_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8 +#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9 +#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec @@ -4345,6 +4357,10 @@ #define regCM2_CM_DEALPHA_BASE_IDX 2 #define regCM2_CM_COEF_FORMAT 0x1062 #define regCM2_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM2_CM_TEST_DEBUG_INDEX 0x1063 +#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM2_CM_TEST_DEBUG_DATA 0x1064 +#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec @@ -4731,6 +4747,10 @@ #define regCM3_CM_DEALPHA_BASE_IDX 2 #define regCM3_CM_COEF_FORMAT 0x11cd #define regCM3_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce +#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM3_CM_TEST_DEBUG_DATA 0x11cf +#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec @@ -11789,6 +11809,10 @@ #define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a +#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b +#define regDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 // addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec @@ -11897,6 +11921,10 @@ #define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096 +#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097 +#define regDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 // addressBlock: dce_dc_dsc1_dispdec_dsccif_dispdec @@ -12005,7 +12033,10 @@ #define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 - +#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2 +#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_DATA0 0x30f3 +#define regDSCC2_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 // addressBlock: dce_dc_dsc2_dispdec_dsccif_dispdec // base address: 0x2e0 @@ -12113,6 +12144,10 @@ #define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e +#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_DATA0 0x314f +#define regDSCC3_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 // addressBlock: dce_dc_dsc3_dispdec_dsccif_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h index e82dffc2b9b0..ce773fca621f 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h @@ -11547,6 +11547,11 @@ #define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L #define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L #define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L // addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec @@ -42315,6 +42320,15 @@ //DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0 #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL +//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L // addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec @@ -42348,7 +42362,9 @@ #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L //DSC_TOP0_DSC_DEBUG_CONTROL #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L // addressBlock: dce_dc_dsc1_dispdec_dscc_dispdec -- cgit v1.2.3-70-g09d2 From b04c21abe21ff56f7a4da8a53c2779575192ffaf Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Tue, 20 Feb 2024 14:54:46 -0500 Subject: drm/amd/display: skip forcing odm in minimal transition [why] In minial transitions state, ODM combine shouldn't be forced as it will make transition non seamless. The force ODM debug option is to control the end result not the intermediate transition. So we can temporarily disable ODM forcing when committing minimal transition state. [how] Backup stream ODM forcing option and clear it in minimal transition state. Once minimal transition state is released, we will restore the original debug option back. Reviewed-by: Samson Tam Acked-by: Wayne Lin Signed-off-by: Wenjing Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 97 +++++++++++++++++++++----------- 1 file changed, 65 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6a65aa623e8a..0757207e7179 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -4158,55 +4158,88 @@ struct pipe_split_policy_backup { bool dynamic_odm_policy; bool subvp_policy; enum pipe_split_policy mpc_policy; + char force_odm[MAX_PIPES]; }; -static void release_minimal_transition_state(struct dc *dc, - struct dc_state *context, struct pipe_split_policy_backup *policy) +static void backup_and_set_minimal_pipe_split_policy(struct dc *dc, + struct dc_state *context, + struct pipe_split_policy_backup *policy) { - dc_state_release(context); - /* restore previous pipe split and odm policy */ + int i; + + if (!dc->config.is_vmin_only_asic) { + policy->mpc_policy = dc->debug.pipe_split_policy; + dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; + } + policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; + dc->debug.enable_single_display_2to1_odm_policy = false; + policy->subvp_policy = dc->debug.force_disable_subvp; + dc->debug.force_disable_subvp = true; + for (i = 0; i < context->stream_count; i++) { + policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments; + context->streams[i]->debug.force_odm_combine_segments = 0; + } +} + +static void restore_minimal_pipe_split_policy(struct dc *dc, + struct dc_state *context, + struct pipe_split_policy_backup *policy) +{ + uint8_t i; + if (!dc->config.is_vmin_only_asic) dc->debug.pipe_split_policy = policy->mpc_policy; - dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy; + dc->debug.enable_single_display_2to1_odm_policy = + policy->dynamic_odm_policy; dc->debug.force_disable_subvp = policy->subvp_policy; + for (i = 0; i < context->stream_count; i++) + context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i]; +} + +static void release_minimal_transition_state(struct dc *dc, + struct dc_state *minimal_transition_context, + struct dc_state *base_context, + struct pipe_split_policy_backup *policy) +{ + restore_minimal_pipe_split_policy(dc, base_context, policy); + dc_state_release(minimal_transition_context); + /* restore previous pipe split and odm policy */ +} + +static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context) +{ + uint8_t i; + int j; + struct dc_stream_status *stream_status; + + for (i = 0; i < context->stream_count; i++) { + stream_status = &context->stream_status[i]; + + for (j = 0; j < stream_status->plane_count; j++) + stream_status->plane_states[j]->flip_immediate = false; + } } static struct dc_state *create_minimal_transition_state(struct dc *dc, struct dc_state *base_context, struct pipe_split_policy_backup *policy) { struct dc_state *minimal_transition_context = NULL; - unsigned int i, j; minimal_transition_context = dc_state_create_copy(base_context); if (!minimal_transition_context) return NULL; - if (!dc->config.is_vmin_only_asic) { - policy->mpc_policy = dc->debug.pipe_split_policy; - dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; - } - policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy; - dc->debug.enable_single_display_2to1_odm_policy = false; - policy->subvp_policy = dc->debug.force_disable_subvp; - dc->debug.force_disable_subvp = true; - + backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); /* commit minimal state */ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) { - for (i = 0; i < minimal_transition_context->stream_count; i++) { - struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i]; - - for (j = 0; j < stream_status->plane_count; j++) { - struct dc_plane_state *plane_state = stream_status->plane_states[j]; - - /* force vsync flip when reconfiguring pipes to prevent underflow - * and corruption - */ - plane_state->flip_immediate = false; - } - } + /* prevent underflow and corruption when reconfiguring pipes */ + force_vsync_flip_in_minimal_transition_context(minimal_transition_context); } else { - /* this should never happen */ - release_minimal_transition_state(dc, minimal_transition_context, policy); + /* + * This should never happen, minimal transition state should + * always be validated first before adding pipe split features. + */ + release_minimal_transition_state(dc, minimal_transition_context, base_context, policy); BREAK_TO_DEBUGGER(); minimal_transition_context = NULL; } @@ -4293,7 +4326,7 @@ static bool commit_minimal_transition_based_on_new_context(struct dc *dc, success = true; } release_minimal_transition_state( - dc, intermediate_context, &policy); + dc, intermediate_context, new_context, &policy); } return success; } @@ -4339,7 +4372,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc, success = true; } release_minimal_transition_state(dc, intermediate_context, - &policy); + dc->current_state, &policy); } /* * Restore stream and plane states back to the values associated with @@ -4466,7 +4499,7 @@ static bool commit_minimal_transition_state(struct dc *dc, transition_base_context, &policy); if (transition_context) { ret = dc_commit_state_no_check(dc, transition_context); - release_minimal_transition_state(dc, transition_context, &policy); + release_minimal_transition_state(dc, transition_context, transition_base_context, &policy); } if (ret != DC_OK) { -- cgit v1.2.3-70-g09d2 From 77357f9387bea5efe99b2d2f99c1f29cf6dc22ec Mon Sep 17 00:00:00 2001 From: Xiaojian Du Date: Thu, 7 Mar 2024 16:19:00 +0800 Subject: drm/amdgpu: add VCN sensor value for SMU 13.0.4 This will add VCN sensor value for SMU 13.0.4. Signed-off-by: Xiaojian Du Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index bb98156b2fa1..e8119918ef6b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -318,7 +318,7 @@ static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_AVERAGE_SOCKETPOWER: *value = (metrics->AverageSocketPower << 8) / 1000; @@ -572,6 +572,12 @@ static int smu_v13_0_4_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_4_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v13_0_4_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, -- cgit v1.2.3-70-g09d2 From dc5c3d48e9f3a2a708b28bc987b364b0eb963341 Mon Sep 17 00:00:00 2001 From: Xiaojian Du Date: Fri, 8 Mar 2024 15:57:49 +0800 Subject: drm/admgpu: add vclk and dclk sysfs node for some ASICs This will add vclk and dclk sysfs node for some ASICs. Signed-off-by: Xiaojian Du Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index efc631bddf4a..c90f5972e03e 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2289,9 +2289,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || gc_ver == IP_VERSION(10, 3, 0) || gc_ver == IP_VERSION(10, 1, 2) || gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || gc_ver == IP_VERSION(11, 0, 2) || gc_ver == IP_VERSION(11, 0, 3) || gc_ver == IP_VERSION(9, 4, 3))) @@ -2304,9 +2310,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || gc_ver == IP_VERSION(10, 3, 0) || gc_ver == IP_VERSION(10, 1, 2) || gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || gc_ver == IP_VERSION(11, 0, 2) || gc_ver == IP_VERSION(11, 0, 3) || gc_ver == IP_VERSION(9, 4, 3))) -- cgit v1.2.3-70-g09d2 From ab66c832847fcdffc97d4591ba5547e3990d9d33 Mon Sep 17 00:00:00 2001 From: Zhigang Luo Date: Thu, 29 Feb 2024 16:04:35 -0500 Subject: drm/amdgpu: trigger flr_work if reading pf2vf data failed if reading pf2vf data failed 30 times continuously, it means something is wrong. Need to trigger flr_work to recover the issue. also use dev_err to print the error message to get which device has issue and add warning message if waiting IDH_FLR_NOTIFICATION_CMPL timeout. Signed-off-by: Zhigang Luo Acked-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 15 ++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 29 ++++++++++++++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 3 +++ drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 2 ++ drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 2 ++ 5 files changed, 41 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index efb3b7e74b80..3204b8f6edeb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -143,6 +143,8 @@ const char *amdgpu_asic_name[] = { "LAST", }; +static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev); + /** * DOC: pcie_replay_count * @@ -4968,6 +4970,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, retry: amdgpu_amdkfd_pre_reset(adev); + amdgpu_device_stop_pending_resets(adev); + if (from_hypervisor) r = amdgpu_virt_request_full_gpu(adev, true); else @@ -5708,11 +5712,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ tmp_adev->asic_reset_res = r; } - /* - * Drop all pending non scheduler resets. Scheduler resets - * were already dropped during drm_sched_stop - */ - amdgpu_device_stop_pending_resets(tmp_adev); + if (!amdgpu_sriov_vf(tmp_adev)) + /* + * Drop all pending non scheduler resets. Scheduler resets + * were already dropped during drm_sched_stop + */ + amdgpu_device_stop_pending_resets(tmp_adev); } /* Actual ASIC resets if needed.*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 7a4eae36778a..aed60aaf1a55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -32,6 +32,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" +#include "amdgpu_reset.h" #include "vi.h" #include "soc15.h" #include "nv.h" @@ -424,7 +425,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) return -EINVAL; if (pf2vf_info->size > 1024) { - DRM_ERROR("invalid pf2vf message size\n"); + dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size); return -EINVAL; } @@ -435,7 +436,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, adev->virt.fw_reserve.checksum_key, checksum); if (checksum != checkval) { - DRM_ERROR("invalid pf2vf message\n"); + dev_err(adev->dev, + "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n", + checksum, checkval); return -EINVAL; } @@ -449,7 +452,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, 0, checksum); if (checksum != checkval) { - DRM_ERROR("invalid pf2vf message\n"); + dev_err(adev->dev, + "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n", + checksum, checkval); return -EINVAL; } @@ -485,7 +490,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; break; default: - DRM_ERROR("invalid pf2vf version\n"); + dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version); return -EINVAL; } @@ -584,8 +589,21 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) int ret; ret = amdgpu_virt_read_pf2vf_data(adev); - if (ret) + if (ret) { + adev->virt.vf2pf_update_retry_cnt++; + if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) && + amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) { + if (amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->virt.flr_work)) + return; + else + dev_err(adev->dev, "Failed to queue work! at %s", __func__); + } + goto out; + } + + adev->virt.vf2pf_update_retry_cnt = 0; amdgpu_virt_write_vf2pf_data(adev); out: @@ -606,6 +624,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; + adev->virt.vf2pf_update_retry_cnt = 0; if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) { DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 3f59b7b5523f..a858bc98cad4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -52,6 +52,8 @@ /* tonga/fiji use this offset */ #define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 +#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 30 + enum amdgpu_sriov_vf_mode { SRIOV_VF_MODE_BARE_METAL = 0, SRIOV_VF_MODE_ONE_VF, @@ -257,6 +259,7 @@ struct amdgpu_virt { /* vf2pf message */ struct delayed_work vf2pf_work; uint32_t vf2pf_update_interval_ms; + int vf2pf_update_retry_cnt; /* multimedia bandwidth config */ bool is_mm_bw_enabled; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index a2bd2c3b1ef9..0c7275bca8f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -276,6 +276,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) timeout -= 10; } while (timeout > 1); + dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); + flr_done: atomic_set(&adev->reset_domain->in_gpu_reset, 0); up_write(&adev->reset_domain->sem); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index a1bad772d932..89992c1c9a62 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -309,6 +309,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) timeout -= 10; } while (timeout > 1); + dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); + flr_done: atomic_set(&adev->reset_domain->in_gpu_reset, 0); up_write(&adev->reset_domain->sem); -- cgit v1.2.3-70-g09d2 From d5586e2f5367829880932c8f057f4df9445056cc Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Tue, 27 Feb 2024 16:28:01 -0500 Subject: drm/amd/display: Add debug option for idle reg checks [Why] Leave disabled by default due to sequencing issues around power states where these flags aren't properly reset. [How] Allow re-enabling from DC debug option. Reviewed-by: Gabe Teeger Acked-by: Wayne Lin Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index e17ddda8ec38..8abd19e7f826 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -991,6 +991,7 @@ struct dc_debug_options { bool disable_dmub_reallow_idle; bool disable_timeout; bool disable_extblankadj; + bool enable_idle_reg_checks; unsigned int static_screen_wait_frames; bool force_chroma_subsampling_1tap; }; -- cgit v1.2.3-70-g09d2 From 9dc57c2adf2c307a672f15b4be17c6c14e37cfb9 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 13 Mar 2024 12:50:43 +0800 Subject: drm/amdgpu: add ras event id support add amdgpu ras event id support to better distinguish different error information sources in dmesg logs. the following log will be identify by event id: {event_id} interrupt to inform RAS event {event_id} ACA logs {event_id} errors statistic since from current injection/error query {event_id} errors statistic since from gpu load Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 32 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 207 ++++++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 30 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h | 1 + drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 10 +- 6 files changed, 195 insertions(+), 88 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index 24ad4b97177b..0734490347db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -210,22 +210,26 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) return -EOPNOTSUPP; } -static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry) +static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry, + struct ras_query_context *qctx) { - dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n"); - dev_info(adev->dev, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n", - idx, entry->regs[MCA_REG_IDX_STATUS]); - dev_info(adev->dev, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n", - idx, entry->regs[MCA_REG_IDX_ADDR]); - dev_info(adev->dev, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n", - idx, entry->regs[MCA_REG_IDX_MISC0]); - dev_info(adev->dev, HW_ERR "aca entry[%02d].IPID=0x%016llx\n", - idx, entry->regs[MCA_REG_IDX_IPID]); - dev_info(adev->dev, HW_ERR "aca entry[%02d].SYND=0x%016llx\n", - idx, entry->regs[MCA_REG_IDX_SYND]); + u64 event_id = qctx->event_id; + + RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n"); + RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_STATUS]); + RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_ADDR]); + RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_MISC0]); + RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].IPID=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_IPID]); + RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].SYND=0x%016llx\n", + idx, entry->regs[MCA_REG_IDX_SYND]); } -int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data) +int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct ras_err_data *err_data, struct ras_query_context *qctx) { struct amdgpu_smuio_mcm_config_info mcm_info; struct ras_err_addr err_addr = {0}; @@ -244,7 +248,7 @@ int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_blo list_for_each_entry(node, &mca_set.list, node) { entry = &node->entry; - amdgpu_mca_smu_mca_bank_dump(adev, i++, entry); + amdgpu_mca_smu_mca_bank_dump(adev, i++, entry, qctx); count = 0; ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index b964110ed1e0..e5bf07ce3451 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -169,6 +169,7 @@ void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set); int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry); void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set); -int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data); +int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, + struct ras_err_data *err_data, struct ras_query_context *qctx); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 8ebab6f22e5a..26662c76b293 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1045,6 +1045,7 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, struct ras_manager *ras_mgr, struct ras_err_data *err_data, + struct ras_query_context *qctx, const char *blk_name, bool is_ue, bool is_de) @@ -1052,27 +1053,28 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, struct amdgpu_smuio_mcm_config_info *mcm_info; struct ras_err_node *err_node; struct ras_err_info *err_info; + u64 event_id = qctx->event_id; if (is_ue) { for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; if (err_info->ue_count) { - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld new uncorrectable hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->ue_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld new uncorrectable hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ue_count, + blk_name); } } for_each_ras_error(err_node, &ras_mgr->err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld uncorrectable hardware errors detected in total in %s block\n", - mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld uncorrectable hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); } } else { @@ -1081,44 +1083,44 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; if (err_info->de_count) { - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld new deferred hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->de_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld new deferred hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->de_count, + blk_name); } } for_each_ras_error(err_node, &ras_mgr->err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld deferred hardware errors detected in total in %s block\n", - mcm_info->socket_id, mcm_info->die_id, - err_info->de_count, blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld deferred hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, + err_info->de_count, blk_name); } } else { for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; if (err_info->ce_count) { - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld new correctable hardware errors detected in %s block\n", - mcm_info->socket_id, - mcm_info->die_id, - err_info->ce_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld new correctable hardware errors detected in %s block\n", + mcm_info->socket_id, + mcm_info->die_id, + err_info->ce_count, + blk_name); } } for_each_ras_error(err_node, &ras_mgr->err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; - dev_info(adev->dev, "socket: %d, die: %d, " - "%lld correctable hardware errors detected in total in %s block\n", - mcm_info->socket_id, mcm_info->die_id, - err_info->ce_count, blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, " + "%lld correctable hardware errors detected in total in %s block\n", + mcm_info->socket_id, mcm_info->die_id, + err_info->ce_count, blk_name); } } } @@ -1131,77 +1133,79 @@ static inline bool err_data_has_source_info(struct ras_err_data *data) static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, struct ras_query_if *query_if, - struct ras_err_data *err_data) + struct ras_err_data *err_data, + struct ras_query_context *qctx) { struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head); const char *blk_name = get_ras_block_str(&query_if->head); + u64 event_id = qctx->event_id; if (err_data->ce_count) { if (err_data_has_source_info(err_data)) { - amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, blk_name, false, false); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && adev->smuio.funcs->get_die_id) { - dev_info(adev->dev, "socket: %d, die: %d " - "%ld correctable hardware errors " - "detected in %s block\n", - adev->smuio.funcs->get_socket_id(adev), - adev->smuio.funcs->get_die_id(adev), - ras_mgr->err_data.ce_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " + "%ld correctable hardware errors " + "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + ras_mgr->err_data.ce_count, + blk_name); } else { - dev_info(adev->dev, "%ld correctable hardware errors " - "detected in %s block\n", - ras_mgr->err_data.ce_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors " + "detected in %s block\n", + ras_mgr->err_data.ce_count, + blk_name); } } if (err_data->ue_count) { if (err_data_has_source_info(err_data)) { - amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, blk_name, true, false); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && adev->smuio.funcs->get_die_id) { - dev_info(adev->dev, "socket: %d, die: %d " - "%ld uncorrectable hardware errors " - "detected in %s block\n", - adev->smuio.funcs->get_socket_id(adev), - adev->smuio.funcs->get_die_id(adev), - ras_mgr->err_data.ue_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " + "%ld uncorrectable hardware errors " + "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + ras_mgr->err_data.ue_count, + blk_name); } else { - dev_info(adev->dev, "%ld uncorrectable hardware errors " - "detected in %s block\n", - ras_mgr->err_data.ue_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors " + "detected in %s block\n", + ras_mgr->err_data.ue_count, + blk_name); } } if (err_data->de_count) { if (err_data_has_source_info(err_data)) { - amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, + amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx, blk_name, false, true); } else if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id && adev->smuio.funcs->get_die_id) { - dev_info(adev->dev, "socket: %d, die: %d " - "%ld deferred hardware errors " - "detected in %s block\n", - adev->smuio.funcs->get_socket_id(adev), - adev->smuio.funcs->get_die_id(adev), - ras_mgr->err_data.de_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d " + "%ld deferred hardware errors " + "detected in %s block\n", + adev->smuio.funcs->get_socket_id(adev), + adev->smuio.funcs->get_die_id(adev), + ras_mgr->err_data.de_count, + blk_name); } else { - dev_info(adev->dev, "%ld deferred hardware errors " - "detected in %s block\n", - ras_mgr->err_data.de_count, - blk_name); + RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors " + "detected in %s block\n", + ras_mgr->err_data.de_count, + blk_name); } } } @@ -1294,6 +1298,7 @@ ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *a static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, struct ras_query_if *info, struct ras_err_data *err_data, + struct ras_query_context *qctx, unsigned int error_query_mode) { enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT; @@ -1338,8 +1343,8 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, return ret; } else { /* FIXME: add code to check return value later */ - amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data); - amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data); + amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx); + amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx); } } @@ -1351,6 +1356,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data; + struct ras_query_context qctx; unsigned int error_query_mode; int ret; @@ -1364,8 +1370,12 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) return -EINVAL; + memset(&qctx, 0, sizeof(qctx)); + qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ? + RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID); ret = amdgpu_ras_query_error_status_helper(adev, info, &err_data, + &qctx, error_query_mode); if (ret) goto out_fini_err_data; @@ -1376,7 +1386,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_i info->ce_count = obj->err_data.ce_count; info->de_count = obj->err_data.de_count; - amdgpu_ras_error_generate_report(adev, info, &err_data); + amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx); out_fini_err_data: amdgpu_ras_error_data_fini(&err_data); @@ -3036,6 +3046,35 @@ static int amdgpu_get_ras_schema(struct amdgpu_device *adev) AMDGPU_RAS_ERROR__PARITY; } +static void ras_event_mgr_init(struct ras_event_manager *mgr) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(mgr->seqnos); i++) + atomic64_set(&mgr->seqnos[i], 0); +} + +static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct amdgpu_hive_info *hive; + + if (!ras) + return; + + hive = amdgpu_get_xgmi_hive(adev); + ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr; + + /* init event manager with node 0 on xgmi system */ + if (!amdgpu_in_reset(adev)) { + if (!hive || adev->gmc.xgmi.node_id == 0) + ras_event_mgr_init(ras->event_mgr); + } + + if (hive) + amdgpu_put_xgmi_hive(hive); +} + int amdgpu_ras_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -3356,6 +3395,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + amdgpu_ras_event_mgr_init(adev); + if (amdgpu_aca_is_enabled(adev)) { if (amdgpu_in_reset(adev)) r = amdgpu_aca_reset(adev); @@ -3472,13 +3513,37 @@ void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status) atomic_set(&ras->fed, !!status); } +bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id) +{ + return !(id & BIT_ULL(63)); +} + +u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + u64 id; + + switch (type) { + case RAS_EVENT_TYPE_ISR: + id = (u64)atomic64_read(&ras->event_mgr->seqnos[type]); + break; + case RAS_EVENT_TYPE_INVALID: + default: + id = BIT_ULL(63) | 0ULL; + break; + } + + return id; +} + void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) { if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + u64 event_id = (u64)atomic64_inc_return(&ras->event_mgr->seqnos[RAS_EVENT_TYPE_ISR]); - dev_info(adev->dev, "uncorrectable hardware error" - "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); + RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error" + "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; amdgpu_ras_reset_gpu(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index e0f8ce9d8440..8d26989c75c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -64,6 +64,14 @@ struct amdgpu_iv_entry; /* The high three bits indicates socketid */ #define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK) +#define RAS_EVENT_LOG(_adev, _id, _fmt, ...) \ +do { \ + if (amdgpu_ras_event_id_is_valid((_adev), (_id))) \ + dev_info((_adev)->dev, "{%llu}" _fmt, (_id), ##__VA_ARGS__); \ + else \ + dev_info((_adev)->dev, _fmt, ##__VA_ARGS__); \ +} while (0) + enum amdgpu_ras_block { AMDGPU_RAS_BLOCK__UMC = 0, AMDGPU_RAS_BLOCK__SDMA, @@ -419,6 +427,21 @@ struct umc_ecc_info { int record_ce_addr_supported; }; +enum ras_event_type { + RAS_EVENT_TYPE_INVALID = -1, + RAS_EVENT_TYPE_ISR = 0, + RAS_EVENT_TYPE_COUNT, +}; + +struct ras_event_manager { + atomic64_t seqnos[RAS_EVENT_TYPE_COUNT]; +}; + +struct ras_query_context { + enum ras_event_type type; + u64 event_id; +}; + struct amdgpu_ras { /* ras infrastructure */ /* for ras itself. */ @@ -479,6 +502,11 @@ struct amdgpu_ras { atomic_t page_retirement_req_cnt; /* Fatal error detected flag */ atomic_t fed; + + /* RAS event manager */ + struct ras_event_manager __event_mgr; + struct ras_event_manager *event_mgr; + }; struct ras_fs_data { @@ -879,4 +907,6 @@ void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status); bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev); +bool amdgpu_ras_event_id_is_valid(struct amdgpu_device *adev, u64 id); +u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index 1592c63b3099..a3bfc16de6d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -44,6 +44,7 @@ struct amdgpu_hive_info { struct amdgpu_reset_domain *reset_domain; atomic_t ras_recovery; + struct ras_event_manager event_mgr; }; struct amdgpu_pcs_ras_field { diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 77af4e25ff46..4a02e1f041da 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -404,10 +404,16 @@ static int umc_v12_0_err_cnt_init_per_channel(struct amdgpu_device *adev, static void umc_v12_0_ecc_info_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { + struct ras_query_context qctx; + + memset(&qctx, 0, sizeof(qctx)); + qctx.event_id = amdgpu_ras_acquire_event_id(adev, amdgpu_ras_intr_triggered() ? + RAS_EVENT_TYPE_ISR : RAS_EVENT_TYPE_INVALID); + amdgpu_mca_smu_log_ras_error(adev, - AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status); + AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_CE, ras_error_status, &qctx); amdgpu_mca_smu_log_ras_error(adev, - AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status); + AMDGPU_RAS_BLOCK__UMC, AMDGPU_MCA_ERROR_TYPE_UE, ras_error_status, &qctx); } static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *adev, -- cgit v1.2.3-70-g09d2 From 765bea0d73b1de24d3c32d35a75b0c05c498a3fb Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 27 Feb 2024 10:30:10 +0800 Subject: drm/amdgpu: Apply retry to IP discovery v2 and v4 To ensure GPU driver touch the local framebuffer until it is initialized by integrated firmware. Signed-off-by: Hawking Zhang Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index a07e4b87d4ca..8d9d6b2e68e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -245,12 +245,16 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, return -ENOENT; } +#define IP_DISCOVERY_V2 2 +#define IP_DISCOVERY_V4 4 + static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, uint8_t *binary) { uint64_t vram_size; u32 msg; int i, ret = 0; + int ip_discovery_ver = 0; /* It can take up to a second for IFWI init to complete on some dGPUs, * but generally it should be in the 60-100ms range. Normally this starts @@ -259,7 +263,11 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, * wait for this to complete. Once the C2PMSG is updated, we can * continue. */ - if (dev_is_removable(&adev->pdev->dev)) { + + ip_discovery_ver = RREG32(mmIP_DISCOVERY_VERSION); + if ((dev_is_removable(&adev->pdev->dev)) || + (ip_discovery_ver == IP_DISCOVERY_V2) || + (ip_discovery_ver == IP_DISCOVERY_V4)) { for (i = 0; i < 1000; i++) { msg = RREG32(mmMP0_SMN_C2PMSG_33); if (msg & 0x80000000) -- cgit v1.2.3-70-g09d2 From 3cfaadbe0fcbb5b6a4a46e4c4ad70f5f5091edf3 Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Wed, 13 Mar 2024 15:27:57 +0800 Subject: drm/amdgpu: add support for atom fw version v3_5 Support for atom_firmware_info_v3_5. Signed-off-by: Likun Gao Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 6857c586ded7..a6d64bdbbb14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -34,6 +34,7 @@ union firmware_info { struct atom_firmware_info_v3_2 v32; struct atom_firmware_info_v3_3 v33; struct atom_firmware_info_v3_4 v34; + struct atom_firmware_info_v3_5 v35; }; /* @@ -872,6 +873,10 @@ int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev) fw_reserved_fb_size = (firmware_info->v34.fw_reserved_size_in_kb << 10); break; + case 5: + fw_reserved_fb_size = + (firmware_info->v35.fw_reserved_size_in_kb << 10); + break; default: fw_reserved_fb_size = 0; break; -- cgit v1.2.3-70-g09d2 From b72a7e0fd0f8d235f885f84642e5c71f4e058c4b Mon Sep 17 00:00:00 2001 From: Nicholas Susanto Date: Wed, 28 Feb 2024 14:14:17 -0500 Subject: drm/amd/display: Enabling urgent latency adjustment for DCN35 [Why] Underflow occurs when running netflix in a 4k144 eDP + 4k60 setup. Increasing DCFCLK or urgent latency watermark fixes the issue. Implementing this workaround for now while we figure out why this is happenning in DCN. [How] Enable urgent latency adjustment and match the reference to existing ASIC that also see increased latency at low FCLK. Reviewed-by: Nicholas Kazlauskas Acked-by: Wayne Lin Signed-off-by: Nicholas Susanto Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 80bebfc268db..4d7bcda8ef72 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .dcn_downspread_percent = 0.5, .gpuvm_min_page_size_bytes = 4096, .hostvm_min_page_size_bytes = 4096, - .do_urgent_latency_adjustment = 0, + .do_urgent_latency_adjustment = 1, .urgent_latency_adjustment_fabric_clock_component_us = 0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, }; void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) -- cgit v1.2.3-70-g09d2 From 6fe4dab331a706fe4f08736621f88625c6060df0 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Mon, 18 Mar 2024 11:26:17 +0530 Subject: drm/amdgpu: remove the adev check for NULL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit adev is a global data structure and isn't expected to be NULL and hence removing the redundant adev check from the devcoredump code. Cc: Dan Carpenter Signed-off-by: Sunil Khatri Suggested-by: Dan Carpenter Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c | 57 ++++++++++++++----------------- 1 file changed, 25 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 1434e9a5506b..3398f2a368d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -211,7 +211,8 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, struct drm_printer p; struct amdgpu_coredump_info *coredump = data; struct drm_print_iterator iter; - int i; + struct amdgpu_vm_fault_info *fault_info; + int i, ver; iter.data = buffer; iter.offset = 0; @@ -233,26 +234,22 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, coredump->reset_task_info.pid); /* GPU IP's information of the SOC */ - if (coredump->adev) { - drm_printf(&p, "\nIP Information\n"); - drm_printf(&p, "SOC Family: %d\n", coredump->adev->family); - drm_printf(&p, "SOC Revision id: %d\n", coredump->adev->rev_id); - drm_printf(&p, "SOC External Revision id: %d\n", - coredump->adev->external_rev_id); - - for (int i = 1; i < MAX_HWIP; i++) { - for (int j = 0; j < HWIP_MAX_INSTANCE; j++) { - int ver = coredump->adev->ip_versions[i][j]; - - if (ver) - drm_printf(&p, "HWIP: %s[%d][%d]: v%d.%d.%d.%d.%d\n", - hw_ip_names[i], i, j, - IP_VERSION_MAJ(ver), - IP_VERSION_MIN(ver), - IP_VERSION_REV(ver), - IP_VERSION_VARIANT(ver), - IP_VERSION_SUBREV(ver)); - } + drm_printf(&p, "\nIP Information\n"); + drm_printf(&p, "SOC Family: %d\n", coredump->adev->family); + drm_printf(&p, "SOC Revision id: %d\n", coredump->adev->rev_id); + drm_printf(&p, "SOC External Revision id: %d\n", coredump->adev->external_rev_id); + + for (int i = 1; i < MAX_HWIP; i++) { + for (int j = 0; j < HWIP_MAX_INSTANCE; j++) { + ver = coredump->adev->ip_versions[i][j]; + if (ver) + drm_printf(&p, "HWIP: %s[%d][%d]: v%d.%d.%d.%d.%d\n", + hw_ip_names[i], i, j, + IP_VERSION_MAJ(ver), + IP_VERSION_MIN(ver), + IP_VERSION_REV(ver), + IP_VERSION_VARIANT(ver), + IP_VERSION_SUBREV(ver)); } } @@ -263,18 +260,14 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, coredump->ring->name); } - if (coredump->adev) { - struct amdgpu_vm_fault_info *fault_info = - &coredump->adev->vm_manager.fault_info; - - drm_printf(&p, "\n[%s] Page fault observed\n", - fault_info->vmhub ? "mmhub" : "gfxhub"); - drm_printf(&p, "Faulty page starting at address: 0x%016llx\n", - fault_info->addr); - drm_printf(&p, "Protection fault status register: 0x%x\n\n", - fault_info->status); - } + /* Add page fault information */ + fault_info = &coredump->adev->vm_manager.fault_info; + drm_printf(&p, "\n[%s] Page fault observed\n", + fault_info->vmhub ? "mmhub" : "gfxhub"); + drm_printf(&p, "Faulty page starting at address: 0x%016llx\n", fault_info->addr); + drm_printf(&p, "Protection fault status register: 0x%x\n\n", fault_info->status); + /* Add ring buffer information */ drm_printf(&p, "Ring buffer information\n"); for (int i = 0; i < coredump->adev->num_rings; i++) { int j = 0; -- cgit v1.2.3-70-g09d2 From 0fa9fbbc8a5b78e79acb129d11f42f0c8e5d8f65 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Sat, 16 Mar 2024 22:08:46 +0530 Subject: drm/amdgpu: Fix truncation issues in smu_v13_0_init_microcode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes the below with gcc W=1: drivers/gpu/drm/amd/amdgpu/../pm/swsmu/smu13/smu_v13_0.c: In function ‘smu_v13_0_init_microcode’: drivers/gpu/drm/amd/amdgpu/../pm/swsmu/smu13/smu_v13_0.c:108:52: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 108 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); | ^~ ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/../pm/swsmu/smu13/smu_v13_0.c:108:9: note: ‘snprintf’ output between 12 and 41 bytes into a destination of size 30 108 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cc: Alex Deucher Cc: Christian König Signed-off-by: Srinivasan Shanmugam Suggested-by: Lijo Lazar Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 48170bb5112e..ce16f2a08a47 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -93,7 +93,7 @@ int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; char fw_name[30]; - char ucode_prefix[30]; + char ucode_prefix[15]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; -- cgit v1.2.3-70-g09d2 From db6200a50a5a3900bb2bca992e02922d98ad5742 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Mon, 18 Mar 2024 18:18:18 +0530 Subject: drm/amd/display: Address kdoc for commit_minimal_transition_state_in_dc_update() Adds descriptions for 'new_context', 'srf_updates', and 'surface_count', and removes the excess description for 'context'. Fixes the below with gcc W=1: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:4411: warning: Function parameter or member 'new_context' not described in 'commit_minimal_transition_state_in_dc_update' drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:4411: warning: Function parameter or member 'srf_updates' not described in 'commit_minimal_transition_state_in_dc_update' drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:4411: warning: Function parameter or member 'surface_count' not described in 'commit_minimal_transition_state_in_dc_update' drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:4411: warning: Excess function parameter 'context' description in 'commit_minimal_transition_state_in_dc_update' Cc: Wenjing Liu Cc: Alex Hung Cc: Rodrigo Siqueira Cc: Roman Li Cc: Aurabindo Pillai Cc: Tom Chung Signed-off-by: Srinivasan Shanmugam Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0757207e7179..2c7c3a788ab3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -4387,8 +4387,10 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc, * on current or new context * * @dc: DC structure, used to get the current state - * @context: New context + * @new_context: New context * @stream: Stream getting the update for the flip + * @srf_updates: Surface updates + * @surface_count: Number of surfaces * * The function takes in current state and new state and determine a minimal * transition state as the intermediate step which could make the transition -- cgit v1.2.3-70-g09d2 From a43dbeaba81eb645a12a004c67722c632ed0d94b Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Sat, 24 Feb 2024 09:23:53 +0530 Subject: drm/amd/display: Remove redundant condition in dcn35_calc_blocks_to_gate() pipe_ctx->plane_res.mpcc_inst is of a type that can only hold values between 0 and 255, so it's always greater than or equal to 0. Thus the condition 'pipe_ctx->plane_res.mpcc_inst >= 0' was always true and has been removed. Fixes the below: drivers/gpu/drm/amd/amdgpu/../display/dc/hwss/dcn35/dcn35_hwseq.c:1023 dcn35_calc_blocks_to_gate() warn: always true condition '(pipe_ctx->plane_res.mpcc_inst >= 0) => (0-255 >= 0)' Fixes: 6f8b7565cca4 ("drm/amd/display: Add DCN35 HWSEQ") Cc: Qingqing Zhuo Cc: Harry Wentland Cc: Rodrigo Siqueira Cc: Roman Li Cc: Aurabindo Pillai Cc: Tom Chung Signed-off-by: Srinivasan Shanmugam Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 2e8ec58a16eb..56b0b6edbee0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1019,8 +1019,7 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->plane_res.dpp) update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->plane_res.hubp->inst] = false; - if ((pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) && - pipe_ctx->plane_res.mpcc_inst >= 0) + if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false; if (pipe_ctx->stream_res.dsc) -- cgit v1.2.3-70-g09d2 From abc3b5d21d34d21914b8e3caa75690f72baa2f36 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 31 Jan 2024 17:48:34 +0800 Subject: drm/amdgpu: add new aca_smu_type support Add new types to distinguish between ACA error type and smu mca type. e.g.: the ACA_ERROR_TYPE_DEFERRED is not matched any smu mca valid bank channel, so add new type 'aca_smu_type' to distinguish aca error type and smu mca type. Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 69 ++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h | 15 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 18 ++++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 37 ++++++------ drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 33 ++++++----- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 29 +++++---- drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 10 ++-- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 26 ++++---- 8 files changed, 141 insertions(+), 96 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 493982f94649..2ad2d874f42f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -28,7 +28,7 @@ #define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype} -typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, void *data); +typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data); struct aca_banks { int nr_banks; @@ -86,7 +86,7 @@ static void aca_banks_release(struct aca_banks *banks) } } -static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_error_type type, u32 *count) +static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count) { struct amdgpu_aca *aca = &adev->aca; const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; @@ -127,7 +127,7 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); } -static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_error_type type, +static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type, int start, int count, struct aca_banks *banks) { @@ -143,13 +143,12 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_erro return -EOPNOTSUPP; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: max_count = smu_funcs->max_ue_bank_count; break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: max_count = smu_funcs->max_ce_bank_count; break; - case ACA_ERROR_TYPE_DEFERRED: default: return -EINVAL; } @@ -164,6 +163,8 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_erro if (ret) return ret; + bank.type = type; + aca_smu_bank_dump(adev, i, count, &bank); ret = aca_banks_add_bank(banks, &bank); @@ -195,7 +196,7 @@ static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type t return hwip->hwid == hwid && hwip->mcatype == mcatype; } -static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type) +static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type) { const struct aca_bank_ops *bank_ops = handle->bank_ops; @@ -297,7 +298,7 @@ static int aca_log_errors(struct aca_handle *handle, enum aca_error_type type, } static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, struct aca_bank_report *report) + enum aca_smu_type type, struct aca_bank_report *report) { const struct aca_bank_ops *bank_ops = handle->bank_ops; @@ -313,12 +314,24 @@ static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank * } static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, void *data) + enum aca_smu_type smu_type, void *data) { struct aca_bank_report report; + enum aca_error_type type; int ret; - ret = aca_generate_bank_report(handle, bank, type, &report); + switch (smu_type) { + case ACA_SMU_TYPE_UE: + type = ACA_ERROR_TYPE_UE; + break; + case ACA_SMU_TYPE_CE: + type = ACA_ERROR_TYPE_CE; + break; + default: + return -EINVAL; + } + + ret = aca_generate_bank_report(handle, bank, smu_type, &report); if (ret) return ret; @@ -333,7 +346,7 @@ static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank } static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank, - enum aca_error_type type, bank_handler_t handler, void *data) + enum aca_smu_type type, bank_handler_t handler, void *data) { struct aca_handle *handle; int ret; @@ -354,7 +367,7 @@ static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *ba } static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks, - enum aca_error_type type, bank_handler_t handler, void *data) + enum aca_smu_type type, bank_handler_t handler, void *data) { struct aca_bank_node *node; struct aca_bank *bank; @@ -378,7 +391,7 @@ static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks * return 0; } -static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type, +static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type, bank_handler_t handler, void *data) { struct amdgpu_aca *aca = &adev->aca; @@ -389,10 +402,6 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_error_type type if (list_empty(&aca->mgr.list)) return 0; - /* NOTE: pmfw is only support UE and CE */ - if (type == ACA_ERROR_TYPE_DEFERRED) - type = ACA_ERROR_TYPE_CE; - ret = aca_smu_get_valid_aca_count(adev, type, &count); if (ret) return ret; @@ -479,10 +488,22 @@ out_unlock: static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type, struct ras_err_data *err_data) { + enum aca_smu_type smu_type; int ret; + switch (type) { + case ACA_ERROR_TYPE_UE: + smu_type = ACA_SMU_TYPE_UE; + break; + case ACA_ERROR_TYPE_CE: + smu_type = ACA_SMU_TYPE_CE; + break; + default: + return -EINVAL; + } + /* udpate aca bank to aca source error_cache first */ - ret = aca_banks_update(adev, type, handler_aca_log_bank_error, NULL); + ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, NULL); if (ret) return ret; @@ -784,7 +805,7 @@ static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val) return 0; } -static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_error_type type, int idx) +static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_smu_type type, int idx) { struct aca_bank_info info; int i, ret; @@ -793,7 +814,7 @@ static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_e if (ret) return; - seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_ERROR_TYPE_UE ? "UE" : "CE"); + seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_SMU_TYPE_UE ? "UE" : "CE"); seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n", idx, info.socket_id, info.die_id, info.hwid, info.mcatype); @@ -807,7 +828,7 @@ struct aca_dump_context { }; static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, void *data) + enum aca_smu_type type, void *data) { struct aca_dump_context *ctx = (struct aca_dump_context *)data; @@ -816,7 +837,7 @@ static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *ban return handler_aca_log_bank_error(handle, bank, type, NULL); } -static int aca_dump_show(struct seq_file *m, enum aca_error_type type) +static int aca_dump_show(struct seq_file *m, enum aca_smu_type type) { struct amdgpu_device *adev = (struct amdgpu_device *)m->private; struct aca_dump_context context = { @@ -829,7 +850,7 @@ static int aca_dump_show(struct seq_file *m, enum aca_error_type type) static int aca_dump_ce_show(struct seq_file *m, void *unused) { - return aca_dump_show(m, ACA_ERROR_TYPE_CE); + return aca_dump_show(m, ACA_SMU_TYPE_CE); } static int aca_dump_ce_open(struct inode *inode, struct file *file) @@ -847,7 +868,7 @@ static const struct file_operations aca_ce_dump_debug_fops = { static int aca_dump_ue_show(struct seq_file *m, void *unused) { - return aca_dump_show(m, ACA_ERROR_TYPE_UE); + return aca_dump_show(m, ACA_SMU_TYPE_UE); } static int aca_dump_ue_open(struct inode *inode, struct file *file) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h index 2da50e095883..a48f4abc345c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -99,7 +99,14 @@ enum aca_error_type { ACA_ERROR_TYPE_COUNT }; +enum aca_smu_type { + ACA_SMU_TYPE_UE = 0, + ACA_SMU_TYPE_CE, + ACA_SMU_TYPE_COUNT, +}; + struct aca_bank { + enum aca_smu_type type; u64 regs[ACA_MAX_REGS_COUNT]; }; @@ -157,9 +164,9 @@ struct aca_handle { }; struct aca_bank_ops { - int (*aca_bank_generate_report)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, + int (*aca_bank_generate_report)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, struct aca_bank_report *report, void *data); - bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, + bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data); }; @@ -167,8 +174,8 @@ struct aca_smu_funcs { int max_ue_bank_count; int max_ce_bank_count; int (*set_debug_mode)(struct amdgpu_device *adev, bool enable); - int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_error_type type, u32 *count); - int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_error_type type, int idx, struct aca_bank *bank); + int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count); + int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_smu_type type, int idx, struct aca_bank *bank); }; struct amdgpu_aca { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 20d51f6c9bb8..43b1aaa04234 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1035,12 +1035,12 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) return 0; } -static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, +static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, struct aca_bank_report *report, void *data) { struct amdgpu_device *adev = handle->adev; const char *error_str; - u64 status; + u64 status, count; int ret, ext_error_code; ret = aca_bank_info_decode(bank, &report->info); @@ -1055,9 +1055,17 @@ static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struc if (error_str) dev_info(adev->dev, "%s detected\n", error_str); - if ((type == ACA_ERROR_TYPE_UE && ext_error_code == 0) || - (type == ACA_ERROR_TYPE_CE && ext_error_code == 6)) - report->count[type] = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]); + count = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]); + switch (type) { + case ACA_SMU_TYPE_UE: + report->count[ACA_ERROR_TYPE_UE] = ext_error_code == 0 ? count : 0ULL; + break; + case ACA_SMU_TYPE_CE: + report->count[ACA_ERROR_TYPE_CE] = ext_error_code == 6 ? count : 0ULL; + break; + default: + return -EINVAL; + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index b53c8fd4e8cf..bb5be8c4cf11 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -681,37 +681,40 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { }; static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle, - struct aca_bank *bank, enum aca_error_type type, + struct aca_bank *bank, enum aca_smu_type type, struct aca_bank_report *report, void *data) { - u64 status, misc0; + u64 misc0; u32 instlo; int ret; - status = bank->regs[ACA_REG_IDX_STATUS]; - if ((type == ACA_ERROR_TYPE_UE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) || - (type == ACA_ERROR_TYPE_CE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) { + ret = aca_bank_info_decode(bank, &report->info); + if (ret) + return ret; - ret = aca_bank_info_decode(bank, &report->info); - if (ret) - return ret; + /* NOTE: overwrite info.die_id with xcd id for gfx */ + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; - /* NOTE: overwrite info.die_id with xcd id for gfx */ - instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); - instlo &= GENMASK(31, 1); - report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; + misc0 = bank->regs[ACA_REG_IDX_MISC0]; - misc0 = bank->regs[ACA_REG_IDX_MISC0]; - report->count[type] = ACA_REG__MISC0__ERRCNT(misc0); + switch (type) { + case ACA_SMU_TYPE_UE: + report->count[ACA_ERROR_TYPE_UE] = 1ULL; + break; + case ACA_SMU_TYPE_CE: + report->count[ACA_ERROR_TYPE_CE] = ACA_REG__MISC0__ERRCNT(misc0); + break; + default: + return -EINVAL; } return 0; } static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, void *data) + enum aca_smu_type type, void *data) { u32 instlo; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index c0fc44cdd658..bf83e2c08f04 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -707,24 +707,27 @@ static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = { }; static int mmhub_v1_8_aca_bank_generate_report(struct aca_handle *handle, - struct aca_bank *bank, enum aca_error_type type, + struct aca_bank *bank, enum aca_smu_type type, struct aca_bank_report *report, void *data) { - u64 status, misc0; + u64 misc0; int ret; - status = bank->regs[ACA_REG_IDX_STATUS]; - if ((type == ACA_ERROR_TYPE_UE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) || - (type == ACA_ERROR_TYPE_CE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) { - - ret = aca_bank_info_decode(bank, &report->info); - if (ret) - return ret; - - misc0 = bank->regs[ACA_REG_IDX_MISC0]; - report->count[type] = ACA_REG__MISC0__ERRCNT(misc0); + ret = aca_bank_info_decode(bank, &report->info); + if (ret) + return ret; + + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + + switch (type) { + case ACA_SMU_TYPE_UE: + report->count[ACA_ERROR_TYPE_UE] = 1ULL; + break; + case ACA_SMU_TYPE_CE: + report->count[ACA_ERROR_TYPE_CE] = ACA_REG__MISC0__ERRCNT(misc0); + break; + default: + return -EINVAL; } return 0; @@ -741,7 +744,7 @@ static int mmhub_v1_8_err_codes[] = { }; static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, void *data) + enum aca_smu_type type, void *data) { u32 instlo; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 103dc9c7325f..9aa5c190950e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -2190,24 +2190,27 @@ static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = { }; static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle, - struct aca_bank *bank, enum aca_error_type type, + struct aca_bank *bank, enum aca_smu_type type, struct aca_bank_report *report, void *data) { - u64 status, misc0; + u64 misc0; int ret; - status = bank->regs[ACA_REG_IDX_STATUS]; - if ((type == ACA_ERROR_TYPE_UE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_FAULT) || - (type == ACA_ERROR_TYPE_CE && - ACA_REG__STATUS__ERRORCODEEXT(status) == ACA_EXTERROR_CODE_CE)) { + ret = aca_bank_info_decode(bank, &report->info); + if (ret) + return ret; - ret = aca_bank_info_decode(bank, &report->info); - if (ret) - return ret; + misc0 = bank->regs[ACA_REG_IDX_MISC0]; - misc0 = bank->regs[ACA_REG_IDX_MISC0]; - report->count[type] = ACA_REG__MISC0__ERRCNT(misc0); + switch (type) { + case ACA_SMU_TYPE_UE: + report->count[ACA_ERROR_TYPE_UE] = 1ULL; + break; + case ACA_SMU_TYPE_CE: + report->count[ACA_ERROR_TYPE_CE] = ACA_REG__MISC0__ERRCNT(misc0); + break; + default: + return -EINVAL; } return 0; @@ -2217,7 +2220,7 @@ static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle, static int sdma_v4_4_2_err_codes[] = { 33, 34, 35, 36 }; static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, - enum aca_error_type type, void *data) + enum aca_smu_type type, void *data) { u32 instlo; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 4a02e1f041da..7b841431344f 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -504,7 +504,7 @@ const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = { .query_ras_error_address = umc_v12_0_query_ras_error_address, }; -static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_error_type type, +static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, struct aca_bank_report *report, void *data) { struct amdgpu_device *adev = handle->adev; @@ -517,14 +517,14 @@ static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct status = bank->regs[ACA_REG_IDX_STATUS]; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: if (umc_v12_0_is_uncorrectable_error(adev, status)) { - report->count[type] = 1; + report->count[ACA_ERROR_TYPE_UE] = 1; } break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: if (umc_v12_0_is_correctable_error(adev, status)) { - report->count[type] = 1; + report->count[ACA_ERROR_TYPE_CE] = 1; } break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 4e178916e845..1258060816dd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2984,7 +2984,7 @@ static int aca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) return smu_v13_0_6_mca_set_debug_mode(smu, enable); } -static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_error_type type, u32 *count) +static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_smu_type type, u32 *count) { uint32_t msg; int ret; @@ -2993,10 +2993,10 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err return -EINVAL; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: msg = SMU_MSG_QueryValidMcaCount; break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: msg = SMU_MSG_QueryValidMcaCeCount; break; default: @@ -3013,14 +3013,14 @@ static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_err } static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, - enum aca_error_type type, u32 *count) + enum aca_smu_type type, u32 *count) { struct smu_context *smu = adev->powerplay.pp_handle; int ret; switch (type) { - case ACA_ERROR_TYPE_UE: - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_UE: + case ACA_SMU_TYPE_CE: ret = smu_v13_0_6_get_valid_aca_count(smu, type, count); break; default: @@ -3031,16 +3031,16 @@ static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, return ret; } -static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, +static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type, int idx, int offset, u32 *val) { uint32_t msg, param; switch (type) { - case ACA_ERROR_TYPE_UE: + case ACA_SMU_TYPE_UE: msg = SMU_MSG_McaBankDumpDW; break; - case ACA_ERROR_TYPE_CE: + case ACA_SMU_TYPE_CE: msg = SMU_MSG_McaBankCeDumpDW; break; default: @@ -3052,7 +3052,7 @@ static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_t return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val); } -static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_type type, +static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type, int idx, int offset, u32 *val, int count) { int ret, i; @@ -3069,7 +3069,7 @@ static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_error_typ return 0; } -static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type type, +static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_smu_type type, int idx, int reg_idx, u64 *val) { struct smu_context *smu = adev->powerplay.pp_handle; @@ -3086,13 +3086,13 @@ static int aca_bank_read_reg(struct amdgpu_device *adev, enum aca_error_type typ *val = (u64)data[1] << 32 | data[0]; dev_dbg(adev->dev, "mca read bank reg: type:%s, index: %d, reg_idx: %d, val: 0x%016llx\n", - type == ACA_ERROR_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); + type == ACA_SMU_TYPE_UE ? "UE" : "CE", idx, reg_idx, *val); return 0; } static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, - enum aca_error_type type, int idx, struct aca_bank *bank) + enum aca_smu_type type, int idx, struct aca_bank *bank) { int i, ret, count; -- cgit v1.2.3-70-g09d2 From 949899cbacf54f1611a7c343093069462bbb6625 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 6 Feb 2024 13:24:16 +0800 Subject: drm/amdgpu: add new api to save error count into aca cache add new api to save error count into aca cache. Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 33 +++++++-------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h | 4 +++- 2 files changed, 10 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 2ad2d874f42f..7cbff74c0f2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -274,25 +274,25 @@ static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_ return new_bank_error(aerr, info); } -static int aca_log_errors(struct aca_handle *handle, enum aca_error_type type, - struct aca_bank_report *report) +int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info, + enum aca_error_type type, u64 count) { struct aca_error_cache *error_cache = &handle->error_cache; struct aca_bank_error *bank_error; struct aca_error *aerr; - if (!handle || !report) + if (!handle || !info || type >= ACA_ERROR_TYPE_COUNT) return -EINVAL; - if (!report->count[type]) + if (!count) return 0; aerr = &error_cache->errors[type]; - bank_error = get_bank_error(aerr, &report->info); + bank_error = get_bank_error(aerr, info); if (!bank_error) return -ENOMEM; - bank_error->count[type] += report->count[type]; + bank_error->count += count; return 0; } @@ -317,31 +317,12 @@ static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank enum aca_smu_type smu_type, void *data) { struct aca_bank_report report; - enum aca_error_type type; int ret; - switch (smu_type) { - case ACA_SMU_TYPE_UE: - type = ACA_ERROR_TYPE_UE; - break; - case ACA_SMU_TYPE_CE: - type = ACA_ERROR_TYPE_CE; - break; - default: - return -EINVAL; - } - ret = aca_generate_bank_report(handle, bank, smu_type, &report); if (ret) return ret; - if (!report.count[type]) - return 0; - - ret = aca_log_errors(handle, type, &report); - if (ret) - return ret; - return 0; } @@ -440,7 +421,7 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er if (type >= ACA_ERROR_TYPE_COUNT) return -EINVAL; - count = bank_error->count[type]; + count = bank_error->count; if (!count) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h index a48f4abc345c..470efefcde10 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -130,7 +130,7 @@ struct aca_bank_report { struct aca_bank_error { struct list_head node; struct aca_bank_info info; - u64 count[ACA_ERROR_TYPE_COUNT]; + u64 count; }; struct aca_error { @@ -206,4 +206,6 @@ int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *han enum aca_error_type type, void *data); int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en); void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root); +int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info, + enum aca_error_type type, u64 count); #endif -- cgit v1.2.3-70-g09d2 From e6136150cd2666a03c71208b0a0521c6996107dc Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 22 Feb 2024 10:10:43 +0800 Subject: drm/amdgpu: refine aca error cache for gfx v9.4.3 refine aca error cache for gfx 9.4.3 Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index bb5be8c4cf11..cc7cccf0b2b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -684,33 +684,36 @@ static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, struct aca_bank_report *report, void *data) { + struct aca_bank_info info; u64 misc0; u32 instlo; int ret; - ret = aca_bank_info_decode(bank, &report->info); + ret = aca_bank_info_decode(bank, &info); if (ret) return ret; /* NOTE: overwrite info.die_id with xcd id for gfx */ instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); instlo &= GENMASK(31, 1); - report->info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; + info.die_id = instlo == mmSMNAID_XCD0_MCA_SMU ? 0 : 1; misc0 = bank->regs[ACA_REG_IDX_MISC0]; switch (type) { case ACA_SMU_TYPE_UE: - report->count[ACA_ERROR_TYPE_UE] = 1ULL; + ret = aca_error_cache_log_bank_error(handle, &info, + ACA_ERROR_TYPE_UE, 1ULL); break; case ACA_SMU_TYPE_CE: - report->count[ACA_ERROR_TYPE_CE] = ACA_REG__MISC0__ERRCNT(misc0); + ret = aca_error_cache_log_bank_error(handle, &info, + ACA_ERROR_TYPE_CE, ACA_REG__MISC0__ERRCNT(misc0)); break; default: return -EINVAL; } - return 0; + return ret; } static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, -- cgit v1.2.3-70-g09d2 From d8a3f0a0348d02adf975fb0be71938dfb1c2e273 Mon Sep 17 00:00:00 2001 From: Christian Koenig Date: Mon, 18 Mar 2024 11:43:39 +0100 Subject: drm/amdgpu: implement TLB flush fence The problem is that when (for example) 4k pages are replaced with a single 2M page we need to wait for change to be flushed out by invalidating the TLB before the PT can be freed. Solve this by moving the TLB flush into a DMA-fence object which can be used to delay the freeing of the PT BOs until it is signaled. V2: (Shashank) - rebase - set dma_fence_error only in case of error - add tlb_flush fence only when PT/PD BO is locked (Felix) - use vm->pasid when f is NULL (Mukul) V4: - add a wait for (f->dependency) in tlb_fence_work (Christian) - move the misplaced fence_create call to the end (Philip) V5: - free the f->dependency properly V6: (Shashank) - light code movement, moved all the clean-up in previous patch - introduce params.needs_flush and its usage in this patch - rebase without TLB HW sequence patch V7: - Keep the vm->last_update_fence and tlb_cb code until we can fix the HW sequencing (Christian) - Move all the tlb_fence related code in a separate function so that its easier to read and review V9: Addressed review comments from Christian - start PT update only when we have callback memory allocated V10: - handle device unlock in OOM case (Christian, Mukul) - added Christian's R-B Cc: Christian Koenig Cc: Felix Kuehling Cc: Rajneesh Bhardwaj Cc: Alex Deucher Acked-by: Felix Kuehling Acked-by: Rajneesh Bhardwaj Tested-by: Rajneesh Bhardwaj Reviewed-by: Shashank Sharma Reviewed-by: Christian Koenig Signed-off-by: Christian Koenig Signed-off-by: Shashank Sharma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 62 ++++++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 4 + drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c | 112 +++++++++++++++++++++++ 7 files changed, 175 insertions(+), 20 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 4536c8ad0e11..f24f11ac3e92 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -70,7 +70,8 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o \ atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ - amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_ib.o amdgpu_pll.o \ + amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_vm_tlb_fence.o \ + amdgpu_ib.o amdgpu_pll.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \ amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 81fb3465e197..317d2d468a6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -885,6 +885,44 @@ static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence, kfree(tlb_cb); } +/** + * amdgpu_vm_tlb_flush - prepare TLB flush + * + * @params: parameters for update + * @fence: input fence to sync TLB flush with + * @tlb_cb: the callback structure + * + * Increments the tlb sequence to make sure that future CS execute a VM flush. + */ +static void +amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params, + struct dma_fence **fence, + struct amdgpu_vm_tlb_seq_struct *tlb_cb) +{ + struct amdgpu_vm *vm = params->vm; + + if (!fence || !*fence) + return; + + tlb_cb->vm = vm; + if (!dma_fence_add_callback(*fence, &tlb_cb->cb, + amdgpu_vm_tlb_seq_cb)) { + dma_fence_put(vm->last_tlb_flush); + vm->last_tlb_flush = dma_fence_get(*fence); + } else { + amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); + } + + /* Prepare a TLB flush fence to be attached to PTs */ + if (!params->unlocked && vm->is_compute_context) { + amdgpu_vm_tlb_fence_create(params->adev, vm, fence); + + /* Makes sure no PD/PT is freed before the flush */ + dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence, + DMA_RESV_USAGE_BOOKKEEP); + } +} + /** * amdgpu_vm_update_range - update a range in the vm page table * @@ -916,8 +954,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ttm_resource *res, dma_addr_t *pages_addr, struct dma_fence **fence) { - struct amdgpu_vm_update_params params; struct amdgpu_vm_tlb_seq_struct *tlb_cb; + struct amdgpu_vm_update_params params; struct amdgpu_res_cursor cursor; enum amdgpu_sync_mode sync_mode; int r, idx; @@ -927,8 +965,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL); if (!tlb_cb) { - r = -ENOMEM; - goto error_unlock; + drm_dev_exit(idx); + return -ENOMEM; } /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache, @@ -948,6 +986,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.immediate = immediate; params.pages_addr = pages_addr; params.unlocked = unlocked; + params.needs_flush = flush_tlb; params.allow_override = allow_override; /* Implicitly sync to command submissions in the same VM before @@ -1031,24 +1070,16 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, } r = vm->update_funcs->commit(¶ms, fence); + if (r) + goto error_free; - if (flush_tlb || params.table_freed) { - tlb_cb->vm = vm; - if (fence && *fence && - !dma_fence_add_callback(*fence, &tlb_cb->cb, - amdgpu_vm_tlb_seq_cb)) { - dma_fence_put(vm->last_tlb_flush); - vm->last_tlb_flush = dma_fence_get(*fence); - } else { - amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb); - } + if (params.needs_flush) { + amdgpu_vm_tlb_flush(¶ms, fence, tlb_cb); tlb_cb = NULL; } error_free: kfree(tlb_cb); - -error_unlock: amdgpu_vm_eviction_unlock(vm); drm_dev_exit(idx); return r; @@ -2391,6 +2422,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, mutex_init(&vm->eviction_lock); vm->evicting = false; + vm->tlb_fence_context = dma_fence_context_alloc(1); r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, false, &root, xcp_id); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 8efa8422f4f7..b0a4fe683352 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -257,9 +257,9 @@ struct amdgpu_vm_update_params { unsigned int num_dw_left; /** - * @table_freed: return true if page table is freed when updating + * @needs_flush: true whenever we need to invalidate the TLB */ - bool table_freed; + bool needs_flush; /** * @allow_override: true for memory that is not uncached: allows MTYPE @@ -342,6 +342,7 @@ struct amdgpu_vm { atomic64_t tlb_seq; struct dma_fence *last_tlb_flush; atomic64_t kfd_last_flushed_seq; + uint64_t tlb_fence_context; /* How many times we had to re-generate the page tables */ uint64_t generation; @@ -611,5 +612,8 @@ void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev, uint64_t addr, uint32_t status, unsigned int vmhub); +void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, + struct amdgpu_vm *vm, + struct dma_fence **fence); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 6e31621452de..3895bd7d176a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -108,7 +108,9 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p, struct dma_fence **fence) { - /* Flush HDP */ + if (p->needs_flush) + atomic64_inc(&p->vm->tlb_seq); + mb(); amdgpu_device_flush_hdp(p->adev, NULL); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 124389a6bf48..601df0ce8290 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -972,7 +972,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, while (cursor.pfn < frag_start) { /* Make sure previous mapping is freed */ if (cursor.entry->bo) { - params->table_freed = true; + params->needs_flush = true; amdgpu_vm_pt_free_dfs(adev, params->vm, &cursor, params->unlocked); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 349416e176a1..66e8a016126b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -126,6 +126,10 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, WARN_ON(ib->length_dw == 0); amdgpu_ring_pad_ib(ring, ib); + + if (p->needs_flush) + atomic64_inc(&p->vm->tlb_seq); + WARN_ON(ib->length_dw > p->num_dw_left); f = amdgpu_job_submit(p->job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c new file mode 100644 index 000000000000..51cddfa3f1e8 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "amdgpu.h" +#include "amdgpu_vm.h" +#include "amdgpu_gmc.h" + +struct amdgpu_tlb_fence { + struct dma_fence base; + struct amdgpu_device *adev; + struct dma_fence *dependency; + struct work_struct work; + spinlock_t lock; + uint16_t pasid; + +}; + +static const char *amdgpu_tlb_fence_get_driver_name(struct dma_fence *fence) +{ + return "amdgpu tlb fence"; +} + +static const char *amdgpu_tlb_fence_get_timeline_name(struct dma_fence *f) +{ + return "amdgpu tlb timeline"; +} + +static void amdgpu_tlb_fence_work(struct work_struct *work) +{ + struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work); + int r; + + if (f->dependency) { + dma_fence_wait(f->dependency, false); + dma_fence_put(f->dependency); + f->dependency = NULL; + } + + r = amdgpu_gmc_flush_gpu_tlb_pasid(f->adev, f->pasid, 2, true, 0); + if (r) { + dev_err(f->adev->dev, "TLB flush failed for PASID %d.\n", + f->pasid); + dma_fence_set_error(&f->base, r); + } + + dma_fence_signal(&f->base); + dma_fence_put(&f->base); +} + +static const struct dma_fence_ops amdgpu_tlb_fence_ops = { + .use_64bit_seqno = true, + .get_driver_name = amdgpu_tlb_fence_get_driver_name, + .get_timeline_name = amdgpu_tlb_fence_get_timeline_name +}; + +void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct dma_fence **fence) +{ + struct amdgpu_tlb_fence *f; + + f = kmalloc(sizeof(*f), GFP_KERNEL); + if (!f) { + /* + * We can't fail since the PDEs and PTEs are already updated, so + * just block for the dependency and execute the TLB flush + */ + if (*fence) + dma_fence_wait(*fence, false); + + amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, 2, true, 0); + *fence = dma_fence_get_stub(); + return; + } + + f->adev = adev; + f->dependency = *fence; + f->pasid = vm->pasid; + INIT_WORK(&f->work, amdgpu_tlb_fence_work); + spin_lock_init(&f->lock); + + dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock, + vm->tlb_fence_context, atomic64_read(&vm->tlb_seq)); + + /* TODO: We probably need a separate wq here */ + dma_fence_get(&f->base); + schedule_work(&f->work); + + *fence = &f->base; +} -- cgit v1.2.3-70-g09d2 From 5b53390e4bceb584c50b1c738d666d6b18cad8c4 Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Wed, 28 Feb 2024 11:09:28 -0500 Subject: drm/amd/display: add stream clock source to DP DTO params [WHY&HOW] Stream clock source is a required parameter for DP DTO programming. Reviewed-by: Chris Park Acked-by: Wayne Lin Signed-off-by: Dillon Varone Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index b9a06bf84cc9..722eff84ccfd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -59,6 +59,7 @@ enum dentist_dispclk_change_mode { struct dp_dto_params { int otg_inst; enum signal_type signal; + enum streamclk_source clk_src; uint64_t pixclk_hz; uint64_t refclk_hz; }; -- cgit v1.2.3-70-g09d2 From 22194e71ea8928c3a9a02ba88a21749b88d6dd8c Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Wed, 28 Feb 2024 21:05:21 -0500 Subject: drm/amd/display: Program pixclk according to dcn revision [WHY&HOW] Pixel clock programming should be built per dcn revision, not hardcoded to use dcn20. Reviewed-by: Chris Park Acked-by: Wayne Lin Signed-off-by: Dillon Varone Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index a2387cea1af9..d521fc65afe3 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -1282,8 +1282,13 @@ void dcn20_build_pipe_pix_clk_params(struct pipe_ctx *pipe_ctx) static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) { + struct resource_pool *pool = pipe_ctx->stream->ctx->dc->res_pool; - dcn20_build_pipe_pix_clk_params(pipe_ctx); + if (pool->funcs->build_pipe_pix_clk_params) { + pool->funcs->build_pipe_pix_clk_params(pipe_ctx); + } else { + dcn20_build_pipe_pix_clk_params(pipe_ctx); + } pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; -- cgit v1.2.3-70-g09d2 From 5275114a7043425b690275566e80d450ddc7525d Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 22 Feb 2024 10:11:07 +0800 Subject: drm/amdgpu: refine aca error cache for mmhub v1.8 refine aca error cache for mmhub v1.8 Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index bf83e2c08f04..c41f600a5e10 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -710,27 +710,29 @@ static int mmhub_v1_8_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, struct aca_bank_report *report, void *data) { + struct aca_bank_info info; u64 misc0; int ret; - ret = aca_bank_info_decode(bank, &report->info); + ret = aca_bank_info_decode(bank, &info); if (ret) return ret; misc0 = bank->regs[ACA_REG_IDX_MISC0]; - switch (type) { case ACA_SMU_TYPE_UE: - report->count[ACA_ERROR_TYPE_UE] = 1ULL; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); break; case ACA_SMU_TYPE_CE: - report->count[ACA_ERROR_TYPE_CE] = ACA_REG__MISC0__ERRCNT(misc0); + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, + ACA_REG__MISC0__ERRCNT(misc0)); break; default: return -EINVAL; } - return 0; + return ret; } /* reference to smu driver if header file */ -- cgit v1.2.3-70-g09d2 From 29fd17cee17c98c978ff27da29c825cbaf08b9d5 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Thu, 29 Feb 2024 18:46:04 -0500 Subject: drm/amd/display: clear mpc_tree in init_pipes [Why] During init_pipes, otg master is not initialized. So mpc tree is still configured even if mpc bottom is not active [How] For pipes that have tg enabled, check their mpc tree and clear opp_list if mpc bottom is not active Reviewed-by: George Shen Acked-by: Wayne Lin Signed-off-by: Samson Tam Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c | 16 ++++++++++++++++ drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index 314798400b16..e0c3c14d12f3 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -1366,6 +1366,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) struct dce_hwseq *hws = dc->hwseq; struct hubbub *hubbub = dc->res_pool->hubbub; bool can_apply_seamless_boot = false; + bool tg_enabled[MAX_PIPES] = {false}; for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) { @@ -1447,6 +1448,7 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) // requesting data while in PSR. tg->funcs->tg_init(tg); hubp->power_gated = true; + tg_enabled[i] = true; continue; } @@ -1488,6 +1490,20 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) tg->funcs->tg_init(tg); } + /* Clean up MPC tree */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (tg_enabled[i]) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) { + int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id; + + if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id])) + dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; + } + } + } + } + /* Power gate DSCs */ if (hws->funcs.dsc_pg_control != NULL) { uint32_t num_opps = 0; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 56b0b6edbee0..2fb90c2330a8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -720,6 +720,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) struct hubbub *hubbub = dc->res_pool->hubbub; struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; bool can_apply_seamless_boot = false; + bool tg_enabled[MAX_PIPES] = {false}; for (i = 0; i < context->stream_count; i++) { if (context->streams[i]->apply_seamless_boot_optimization) { @@ -801,6 +802,7 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) // requesting data while in PSR. tg->funcs->tg_init(tg); hubp->power_gated = true; + tg_enabled[i] = true; continue; } @@ -842,6 +844,20 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) tg->funcs->tg_init(tg); } + /* Clean up MPC tree */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (tg_enabled[i]) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list) { + if (dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot) { + int bot_id = dc->res_pool->opps[i]->mpc_tree_params.opp_list->mpcc_bot->mpcc_id; + + if ((bot_id < MAX_MPCC) && (bot_id < MAX_PIPES) && (!tg_enabled[bot_id])) + dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL; + } + } + } + } + if (pg_cntl != NULL) { if (pg_cntl->funcs->dsc_pg_control != NULL) { uint32_t num_opps = 0; -- cgit v1.2.3-70-g09d2 From 2a88f1b5d0fa88741ccb0860430e0d06acec5f54 Mon Sep 17 00:00:00 2001 From: Xiaojian Du Date: Fri, 8 Mar 2024 16:26:52 +0800 Subject: drm/amdgpu: add VCN sensor value for Vangogh This will drm/amdgpu: add VCN sensor value for Vangogh. Signed-off-by: Xiaojian Du Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 2ff6deedef95..29295941aca9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -301,7 +301,7 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_AVERAGE_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / @@ -1507,6 +1507,12 @@ static int vangogh_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = vangogh_common_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = vangogh_common_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, -- cgit v1.2.3-70-g09d2 From 176c3e89567fdc037c318f672bbe3c2271004560 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 11 Mar 2024 16:51:49 +0800 Subject: drm/amdgpu: add utcl2 RAS poison query for mmhub Add it for mmhub v1.8. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h | 2 ++ drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 15 +++++++++++++++ 2 files changed, 17 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 1ca9d4ed8063..95d676ee207f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -63,6 +63,8 @@ struct amdgpu_mmhub_funcs { uint64_t page_table_base); void (*update_power_gating)(struct amdgpu_device *adev, bool enable); + bool (*query_utcl2_poison_status)(struct amdgpu_device *adev, + int hub_inst); }; struct amdgpu_mmhub { diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index c41f600a5e10..50113cadaa0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -559,6 +559,20 @@ static void mmhub_v1_8_get_clockgating(struct amdgpu_device *adev, u64 *flags) } +static bool mmhub_v1_8_query_utcl2_poison_status(struct amdgpu_device *adev, + int hub_inst) +{ + u32 fed, status; + + status = RREG32_SOC15(MMHUB, hub_inst, regVM_L2_PROTECTION_FAULT_STATUS); + fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); + /* reset page fault status */ + WREG32_P(SOC15_REG_OFFSET(MMHUB, hub_inst, + regVM_L2_PROTECTION_FAULT_STATUS), 1, ~1); + + return fed; +} + const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { .get_fb_location = mmhub_v1_8_get_fb_location, .init = mmhub_v1_8_init, @@ -568,6 +582,7 @@ const struct amdgpu_mmhub_funcs mmhub_v1_8_funcs = { .setup_vm_pt_regs = mmhub_v1_8_setup_vm_pt_regs, .set_clockgating = mmhub_v1_8_set_clockgating, .get_clockgating = mmhub_v1_8_get_clockgating, + .query_utcl2_poison_status = mmhub_v1_8_query_utcl2_poison_status, }; static const struct amdgpu_ras_err_status_reg_entry mmhub_v1_8_ce_reg_list[] = { -- cgit v1.2.3-70-g09d2 From d8070c4241087d8c1ad3fc21632bac268dd7578a Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 11 Mar 2024 16:53:53 +0800 Subject: drm/amdgpu: support utcl2 RAS poison query for mmhub Support the query for both gfxhub and mmhub, also replace xcc_id with hub_inst. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 17 ++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 ++--- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c | 17 +++++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 17 +++++++++++------ 5 files changed, 37 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index d5fde8adf19b..8ee18c2c082a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -770,12 +770,19 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, } bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev, - int xcc_id) + int hub_inst, int hub_type) { - if (adev->gfxhub.funcs->query_utcl2_poison_status) - return adev->gfxhub.funcs->query_utcl2_poison_status(adev, xcc_id); - else - return false; + if (!hub_type) { + if (adev->gfxhub.funcs->query_utcl2_poison_status) + return adev->gfxhub.funcs->query_utcl2_poison_status(adev, hub_inst); + else + return false; + } else { + if (adev->mmhub.funcs->query_utcl2_poison_status) + return adev->mmhub.funcs->query_utcl2_poison_status(adev, hub_inst); + else + return false; + } } int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index caee36e52a09..6b67f0025966 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -342,7 +342,7 @@ bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem * void amdgpu_amdkfd_block_mmu_notifications(void *p); int amdgpu_amdkfd_criu_resume(void *p); bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev, - int xcc_id); + int hub_inst, int hub_type); int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag, int8_t xcp_id); void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 05d52b9274a9..470a146f2f43 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -666,10 +666,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW); fed = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, FED); - /* for gfx fed error, kfd will handle it, return directly */ + /* for fed error, kfd will handle it, return directly */ if (fed && amdgpu_ras_is_poison_mode_supported(adev) && - (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2)) && - (vmhub < AMDGPU_MMHUB0_START)) + (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2))) return 0; WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index a8e76287dde0..650da18b0d87 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -369,18 +369,23 @@ static void event_interrupt_wq_v10(struct kfd_node *dev, uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry); - int xcc_id = 0; + int hub_inst = 0; struct kfd_hsa_memory_exception_data exception_data; + /* gfxhub */ if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) { - xcc_id = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, + hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, node_id); - if (xcc_id < 0) - xcc_id = 0; + if (hub_inst < 0) + hub_inst = 0; } - if (client_id == SOC15_IH_CLIENTID_UTCL2 && !vmid_type && - amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, xcc_id)) { + /* mmhub */ + if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC) + hub_inst = node_id / 4; + + if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, + hub_inst, vmid_type)) { event_interrupt_poison_consumption(dev, pasid, client_id); return; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index ff7392336795..11641f4645e6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -415,18 +415,23 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); uint32_t node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); uint32_t vmid_type = SOC15_VMID_TYPE_FROM_IH_ENTRY(ih_ring_entry); - int xcc_id = 0; + int hub_inst = 0; struct kfd_hsa_memory_exception_data exception_data; + /* gfxhub */ if (!vmid_type && dev->adev->gfx.funcs->ih_node_to_logical_xcc) { - xcc_id = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, + hub_inst = dev->adev->gfx.funcs->ih_node_to_logical_xcc(dev->adev, node_id); - if (xcc_id < 0) - xcc_id = 0; + if (hub_inst < 0) + hub_inst = 0; } - if (client_id == SOC15_IH_CLIENTID_UTCL2 && !vmid_type && - amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, xcc_id)) { + /* mmhub */ + if (vmid_type && client_id == SOC15_IH_CLIENTID_VMC) + hub_inst = node_id / 4; + + if (amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev, + hub_inst, vmid_type)) { event_interrupt_poison_consumption_v9(dev, pasid, client_id); return; } -- cgit v1.2.3-70-g09d2 From 62d2aaa7d466ec286ca871e9fc628cedaffb41b1 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 22 Feb 2024 10:11:20 +0800 Subject: drm/amdgpu: refine aca error cache for xgmi v6.4.0 refine aca error cache for xgmi v6.4.0 Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 43b1aaa04234..3e7f0199926e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1039,11 +1039,12 @@ static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struc struct aca_bank_report *report, void *data) { struct amdgpu_device *adev = handle->adev; + struct aca_bank_info info; const char *error_str; u64 status, count; int ret, ext_error_code; - ret = aca_bank_info_decode(bank, &report->info); + ret = aca_bank_info_decode(bank, &info); if (ret) return ret; @@ -1056,18 +1057,21 @@ static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struc dev_info(adev->dev, "%s detected\n", error_str); count = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]); + switch (type) { case ACA_SMU_TYPE_UE: - report->count[ACA_ERROR_TYPE_UE] = ext_error_code == 0 ? count : 0ULL; + count = ext_error_code == 0 ? count : 0ULL; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, count); break; case ACA_SMU_TYPE_CE: - report->count[ACA_ERROR_TYPE_CE] = ext_error_code == 6 ? count : 0ULL; + count = ext_error_code == 6 ? count : 0ULL; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, count); break; default: return -EINVAL; } - return 0; + return ret; } static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = { -- cgit v1.2.3-70-g09d2 From 87428b4054379363e9b3b57128b6c8c2828c0202 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 22 Feb 2024 10:11:31 +0800 Subject: drm/amdgpu: refine aca error cache for sdma v4.4.2 refine aca error cache for sdma v4.4.2 Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 9aa5c190950e..02fc11b98b20 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -2193,27 +2193,29 @@ static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, struct aca_bank_report *report, void *data) { + struct aca_bank_info info; u64 misc0; int ret; - ret = aca_bank_info_decode(bank, &report->info); + ret = aca_bank_info_decode(bank, &info); if (ret) return ret; misc0 = bank->regs[ACA_REG_IDX_MISC0]; - switch (type) { case ACA_SMU_TYPE_UE: - report->count[ACA_ERROR_TYPE_UE] = 1ULL; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); break; case ACA_SMU_TYPE_CE: - report->count[ACA_ERROR_TYPE_CE] = ACA_REG__MISC0__ERRCNT(misc0); + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_CE, + ACA_REG__MISC0__ERRCNT(misc0)); break; default: return -EINVAL; } - return 0; + return ret; } /* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */ -- cgit v1.2.3-70-g09d2 From 69bf42fbb2270441674b38f1c29fe5c59afd06b8 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 22 Feb 2024 10:11:46 +0800 Subject: drm/amdgpu: refine aca error cache for umc v12.0 refine aca error cache for umc v12.0 Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 7b841431344f..b512a4b38067 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -508,10 +508,11 @@ static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct struct aca_bank_report *report, void *data) { struct amdgpu_device *adev = handle->adev; + struct aca_bank_info info; u64 status; int ret; - ret = aca_bank_info_decode(bank, &report->info); + ret = aca_bank_info_decode(bank, &info); if (ret) return ret; @@ -519,12 +520,18 @@ static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct switch (type) { case ACA_SMU_TYPE_UE: if (umc_v12_0_is_uncorrectable_error(adev, status)) { - report->count[ACA_ERROR_TYPE_UE] = 1; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); + if (ret) + return ret; } break; case ACA_SMU_TYPE_CE: if (umc_v12_0_is_correctable_error(adev, status)) { - report->count[ACA_ERROR_TYPE_CE] = 1; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); + if (ret) + return ret; } break; default: -- cgit v1.2.3-70-g09d2 From c26dce0fd9457fd4a00454fe5a2ce7cf3eca21df Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sat, 2 Mar 2024 21:25:38 -0500 Subject: drm/amd/display: [FW Promotion] Release 0.0.208.0 - Add a Replay residency mode which only calcuates the entry time based on replay state 0/1 switch. Acked-by: Wayne Lin Signed-off-by: Anthony Koo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index b81cd2649db3..7b807aea8aa7 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -2957,6 +2957,7 @@ enum pr_residency_mode { PR_RESIDENCY_MODE_ALPM, PR_RESIDENCY_MODE_IPS2, PR_RESIDENCY_MODE_FRAME_CNT, + PR_RESIDENCY_MODE_ENABLEMENT_PERIOD, }; #define REPLAY_RESIDENCY_MODE_SHIFT (0) @@ -2971,6 +2972,7 @@ enum pr_residency_mode { #define REPLAY_RESIDENCY_MODE2_MASK (0xF << REPLAY_RESIDENCY_MODE2_SHIFT) # define REPLAY_RESIDENCY_FIELD_MODE2_IPS (0x1 << REPLAY_RESIDENCY_MODE2_SHIFT) # define REPLAY_RESIDENCY_FIELD_MODE2_FRAME_CNT (0x2 << REPLAY_RESIDENCY_MODE2_SHIFT) +# define REPLAY_RESIDENCY_FIELD_MODE2_EN_PERIOD (0x3 << REPLAY_RESIDENCY_MODE2_SHIFT) #define REPLAY_RESIDENCY_ENABLE_MASK (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT) # define REPLAY_RESIDENCY_DISABLE (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT) -- cgit v1.2.3-70-g09d2 From 689898ca00187ef0c2c943e187d6e56d39c4c678 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 3 Mar 2024 17:34:49 -0500 Subject: drm/amd/display: 3.2.276 This version brings along following fixes: - Clear mpc_tree in init_pipes - Program pixclk according to dcn revision - Add stream clock source to DP DTO params - Enabling urgent latency adjustment for DCN35 - To adjust dprefclk by down spread percentage - Add debug option for idle reg checks - Revert few patches which cause regression - skip forcing odm in minimal transition - Fix noise issue on HDMI AV mute - Enable fast update for DCN314 - Enable 2to1 ODM policy for DCN35 - Fix DCN31 underflow problem - Add the MALL size in the fallback function - Modify coding style/errors and remove redundant codes - Add missing registers and offset Acked-by: Wayne Lin Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 8abd19e7f826..7b22c2efed77 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -51,7 +51,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.275" +#define DC_VER "3.2.276" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3-70-g09d2 From f26c4e3fc999fd90d65ef5bc517a0323b27c43fb Mon Sep 17 00:00:00 2001 From: Candice Li Date: Mon, 18 Mar 2024 11:23:39 +0800 Subject: drm/amdgpu: Update setting EEPROM table version Use helper function instead of umc callback to set EEPROM table version. Signed-off-by: Candice Li Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 22 +++++++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 2 -- drivers/gpu/drm/amd/amdgpu/umc_v8_10.c | 6 ------ 3 files changed, 17 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index b12808c0c331..06a62a8a992e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -404,6 +404,22 @@ static int amdgpu_ras_eeprom_correct_header_tag( return res; } +static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + + switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) { + case IP_VERSION(8, 10, 0): + case IP_VERSION(12, 0, 0): + hdr->version = RAS_TABLE_VER_V2_1; + return; + default: + hdr->version = RAS_TABLE_VER_V1; + return; + } +} + /** * amdgpu_ras_eeprom_reset_table -- Reset the RAS EEPROM table * @control: pointer to control structure @@ -423,11 +439,7 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) mutex_lock(&control->ras_tbl_mutex); hdr->header = RAS_TABLE_HDR_VAL; - if (adev->umc.ras && - adev->umc.ras->set_eeprom_table_version) - adev->umc.ras->set_eeprom_table_version(hdr); - else - hdr->version = RAS_TABLE_VER_V1; + amdgpu_ras_set_eeprom_table_version(control); if (hdr->version == RAS_TABLE_VER_V2_1) { hdr->first_rec_offset = RAS_RECORD_START_V2_1; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 26d2ae498daf..5954e839d580 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -66,8 +66,6 @@ struct amdgpu_umc_ras { void *ras_error_status); bool (*check_ecc_err_status)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, void *ras_error_status); - /* support different eeprom table version for different asic */ - void (*set_eeprom_table_version)(struct amdgpu_ras_eeprom_table_header *hdr); }; struct amdgpu_umc_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c index c4c77257710c..a32f87992f20 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c @@ -442,11 +442,6 @@ static void umc_v8_10_ecc_info_query_ras_error_address(struct amdgpu_device *ade umc_v8_10_ecc_info_query_error_address, ras_error_status); } -static void umc_v8_10_set_eeprom_table_version(struct amdgpu_ras_eeprom_table_header *hdr) -{ - hdr->version = RAS_TABLE_VER_V2_1; -} - const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = { .query_ras_error_count = umc_v8_10_query_ras_error_count, .query_ras_error_address = umc_v8_10_query_ras_error_address, @@ -460,5 +455,4 @@ struct amdgpu_umc_ras umc_v8_10_ras = { .query_ras_poison_mode = umc_v8_10_query_ras_poison_mode, .ecc_info_query_ras_error_count = umc_v8_10_ecc_info_query_ras_error_count, .ecc_info_query_ras_error_address = umc_v8_10_ecc_info_query_ras_error_address, - .set_eeprom_table_version = umc_v8_10_set_eeprom_table_version, }; -- cgit v1.2.3-70-g09d2 From e8e0d7907897eb6e1e28603cc93b96e6e8fd19dc Mon Sep 17 00:00:00 2001 From: Xiaojian Du Date: Fri, 8 Mar 2024 16:31:53 +0800 Subject: drm/amdgpu: add VCN sensor value for yellow carp This will add VCN sensor value for yellow carp. Signed-off-by: Xiaojian Du Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 2d1736234b4a..d8bcf765a803 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -363,7 +363,7 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; @@ -423,6 +423,12 @@ static int yellow_carp_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = yellow_carp_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = yellow_carp_get_smu_metrics_data(smu, METRICS_CURR_SOCKETPOWER, -- cgit v1.2.3-70-g09d2 From b5b6d6251579a29dafdad25f4bc7f3ff7bfd2c86 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 22 Feb 2024 16:52:40 -0500 Subject: drm/amd/display: Workaround register access in idle race with cursor [Why] Cursor update can be pre-empted by a request for setting target flip submission. This causes an issue where we're in the middle of the exit sequence trying to log to DM, but the pre-emption starts another DMCUB command submission that requires being out of idle. The DC lock aqusition can fail, and depending on the DM/OS interface it's possible that the function inserted into this thread must not fail. This means that lock aqusition must be skipped and exit *must* occur. [How] Modify when we consider idle as active. Consider it exited only once the exit has fully finished. Consider it as entered prior to actual notification. Since we're on the same core/thread the cached values are coherent and we'll see that we still need to exit. Once the cursor update resumes it'll continue doing the double exit but this won't cause a functional issue, just a (potential) redundant operation. Reviewed-by: Duncan Ma Acked-by: Wayne Lin Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 765a459d54eb..f796ed061879 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1434,16 +1434,27 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_ * Powering up the hardware requires notifying PMFW and DMCUB. * Clearing the driver idle allow requires a DMCUB command. * DMCUB commands requires the DMCUB to be powered up and restored. - * - * Exit out early to prevent an infinite loop of DMCUB commands - * triggering exit low power - use software state to track this. */ - dc_dmub_srv->idle_allowed = allow_idle; - if (!allow_idle) + if (!allow_idle) { dc_dmub_srv_exit_low_power_state(dc); - else + /* + * Idle is considered fully exited only after the sequence above + * fully completes. If we have a race of two threads exiting + * at the same time then it's safe to perform the sequence + * twice as long as we're not re-entering. + * + * Infinite command submission is avoided by using the + * dm_execute_dmub_cmd submission instead of the "wake" helpers. + */ + dc_dmub_srv->idle_allowed = false; + } else { + /* Consider idle as notified prior to the actual submission to + * prevent multiple entries. */ + dc_dmub_srv->idle_allowed = true; + dc_dmub_srv_notify_idle(dc, allow_idle); + } } bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, -- cgit v1.2.3-70-g09d2 From e3d4de8d8b24933a9588019bf53891bca520b226 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 22 Feb 2024 14:03:14 +0800 Subject: drm/amdgpu: retire unused aca_bank_report data structure retire unused aca_bank_report data structure. Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 17 +++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h | 8 +------- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c | 7 +++---- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 7 +++---- drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 6 +++--- 7 files changed, 24 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 7cbff74c0f2f..a0b7f0d0c513 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -297,29 +297,26 @@ int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_in return 0; } -static int aca_generate_bank_report(struct aca_handle *handle, struct aca_bank *bank, - enum aca_smu_type type, struct aca_bank_report *report) +static int aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type) { const struct aca_bank_ops *bank_ops = handle->bank_ops; - if (!bank || !report) + if (!bank) return -EINVAL; - if (!bank_ops->aca_bank_generate_report) + if (!bank_ops->aca_bank_parser) return -EOPNOTSUPP; - memset(report, 0, sizeof(*report)); - return bank_ops->aca_bank_generate_report(handle, bank, type, - report, handle->data); + return bank_ops->aca_bank_parser(handle, bank, type, + handle->data); } static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank, - enum aca_smu_type smu_type, void *data) + enum aca_smu_type type, void *data) { - struct aca_bank_report report; int ret; - ret = aca_generate_bank_report(handle, bank, smu_type, &report); + ret = aca_bank_parser(handle, bank, type); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h index 470efefcde10..6eb4c0341278 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -122,11 +122,6 @@ struct aca_bank_info { int mcatype; }; -struct aca_bank_report { - struct aca_bank_info info; - u64 count[ACA_ERROR_TYPE_COUNT]; -}; - struct aca_bank_error { struct list_head node; struct aca_bank_info info; @@ -164,8 +159,7 @@ struct aca_handle { }; struct aca_bank_ops { - int (*aca_bank_generate_report)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, - struct aca_bank_report *report, void *data); + int (*aca_bank_parser)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data); bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 3e7f0199926e..523827e8e7a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1035,8 +1035,8 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) return 0; } -static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, - struct aca_bank_report *report, void *data) +static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) { struct amdgpu_device *adev = handle->adev; struct aca_bank_info info; @@ -1075,7 +1075,7 @@ static int xgmi_v6_4_0_aca_bank_generate_report(struct aca_handle *handle, struc } static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = { - .aca_bank_generate_report = xgmi_v6_4_0_aca_bank_generate_report, + .aca_bank_parser = xgmi_v6_4_0_aca_bank_parser, }; static const struct aca_info xgmi_v6_4_0_aca_info = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index cc7cccf0b2b2..fc33354f1d3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -680,9 +680,9 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = { .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst, }; -static int gfx_v9_4_3_aca_bank_generate_report(struct aca_handle *handle, - struct aca_bank *bank, enum aca_smu_type type, - struct aca_bank_report *report, void *data) +static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle, + struct aca_bank *bank, enum aca_smu_type type, + void *data) { struct aca_bank_info info; u64 misc0; @@ -736,7 +736,7 @@ static bool gfx_v9_4_3_aca_bank_is_valid(struct aca_handle *handle, struct aca_b } static const struct aca_bank_ops gfx_v9_4_3_aca_bank_ops = { - .aca_bank_generate_report = gfx_v9_4_3_aca_bank_generate_report, + .aca_bank_parser = gfx_v9_4_3_aca_bank_parser, .aca_bank_is_valid = gfx_v9_4_3_aca_bank_is_valid, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 50113cadaa0d..7a1ff298417a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -721,9 +721,8 @@ static const struct amdgpu_ras_block_hw_ops mmhub_v1_8_ras_hw_ops = { .reset_ras_error_count = mmhub_v1_8_reset_ras_error_count, }; -static int mmhub_v1_8_aca_bank_generate_report(struct aca_handle *handle, - struct aca_bank *bank, enum aca_smu_type type, - struct aca_bank_report *report, void *data) +static int mmhub_v1_8_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) { struct aca_bank_info info; u64 misc0; @@ -780,7 +779,7 @@ static bool mmhub_v1_8_aca_bank_is_valid(struct aca_handle *handle, struct aca_b } static const struct aca_bank_ops mmhub_v1_8_aca_bank_ops = { - .aca_bank_generate_report = mmhub_v1_8_aca_bank_generate_report, + .aca_bank_parser = mmhub_v1_8_aca_bank_parser, .aca_bank_is_valid = mmhub_v1_8_aca_bank_is_valid, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 02fc11b98b20..71c2f50530cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -2189,9 +2189,8 @@ static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = { .reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count, }; -static int sdma_v4_4_2_aca_bank_generate_report(struct aca_handle *handle, - struct aca_bank *bank, enum aca_smu_type type, - struct aca_bank_report *report, void *data) +static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) { struct aca_bank_info info; u64 misc0; @@ -2241,7 +2240,7 @@ static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_ } static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops = { - .aca_bank_generate_report = sdma_v4_4_2_aca_bank_generate_report, + .aca_bank_parser = sdma_v4_4_2_aca_bank_parser, .aca_bank_is_valid = sdma_v4_4_2_aca_bank_is_valid, }; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index b512a4b38067..e2e18ccbbc62 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -504,8 +504,8 @@ const struct amdgpu_ras_block_hw_ops umc_v12_0_ras_hw_ops = { .query_ras_error_address = umc_v12_0_query_ras_error_address, }; -static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, - struct aca_bank_report *report, void *data) +static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) { struct amdgpu_device *adev = handle->adev; struct aca_bank_info info; @@ -542,7 +542,7 @@ static int umc_v12_0_aca_bank_generate_report(struct aca_handle *handle, struct } static const struct aca_bank_ops umc_v12_0_aca_bank_ops = { - .aca_bank_generate_report = umc_v12_0_aca_bank_generate_report, + .aca_bank_parser = umc_v12_0_aca_bank_parser, }; const struct aca_info umc_v12_0_aca_info = { -- cgit v1.2.3-70-g09d2 From 2fc46e0b2fe8802c98a68d5c903e0109e1d4a49c Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Tue, 12 Mar 2024 11:30:09 +0800 Subject: drm/amdgpu: make reset method configurable for RAS poison Each RAS block has different requirement for gpu reset in poison consumption handling. Add support for mmhub RAS poison consumption handling. v2: remove the mmhub poison support for kfd int v10. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c | 16 +++++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c | 13 ++++++++----- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c | 9 +++++---- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 20 +++++++++++++++----- 8 files changed, 41 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 8ee18c2c082a..3b4591f554f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -748,7 +748,7 @@ bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev) } void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, - enum amdgpu_ras_block block, bool reset) + enum amdgpu_ras_block block, uint32_t reset) { amdgpu_umc_poison_handler(adev, block, reset); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 6b67f0025966..c51954c9052e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -336,7 +336,7 @@ void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, - enum amdgpu_ras_block block, bool reset); + enum amdgpu_ras_block block, uint32_t reset); bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev); bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); void amdgpu_amdkfd_block_mmu_notifications(void *p); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 26662c76b293..3c6d532824f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2051,7 +2051,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager * } } - amdgpu_umc_poison_handler(adev, obj->head.block, false); + amdgpu_umc_poison_handler(adev, obj->head.block, 0); if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption) poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); @@ -2704,7 +2704,7 @@ static int amdgpu_ras_page_retirement_thread(void *param) atomic_dec(&con->page_retirement_req_cnt); amdgpu_umc_bad_page_polling_timeout(adev, - false, MAX_UMC_POISON_POLLING_TIME_ASYNC); + 0, MAX_UMC_POISON_POLLING_TIME_ASYNC); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 20436f81856a..f486510fc94c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -177,7 +177,7 @@ static void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev, static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, void *ras_error_status, struct amdgpu_iv_entry *entry, - bool reset) + uint32_t reset) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -186,9 +186,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, amdgpu_umc_handle_bad_pages(adev, ras_error_status); if (err_data->ue_count && reset) { - /* use mode-2 reset for poison consumption */ - if (!entry) - con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; + con->gpu_reset_flags |= reset; amdgpu_ras_reset_gpu(adev); } @@ -196,7 +194,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, } int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, - bool reset, uint32_t timeout_ms) + uint32_t reset, uint32_t timeout_ms) { struct ras_err_data err_data; struct ras_common_if head = { @@ -238,8 +236,7 @@ int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, if (reset) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - /* use mode-2 reset for poison consumption */ - con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET; + con->gpu_reset_flags |= reset; amdgpu_ras_reset_gpu(adev); } @@ -247,7 +244,7 @@ int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, } int amdgpu_umc_poison_handler(struct amdgpu_device *adev, - enum amdgpu_ras_block block, bool reset) + enum amdgpu_ras_block block, uint32_t reset) { int ret = AMDGPU_RAS_SUCCESS; @@ -311,7 +308,8 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, void *ras_error_status, struct amdgpu_iv_entry *entry) { - return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true); + return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, + AMDGPU_RAS_GPU_RESET_MODE1_RESET); } int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 5954e839d580..563b0249247e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -101,7 +101,7 @@ struct amdgpu_umc { int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev); int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); int amdgpu_umc_poison_handler(struct amdgpu_device *adev, - enum amdgpu_ras_block block, bool reset); + enum amdgpu_ras_block block, uint32_t reset); int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); @@ -121,5 +121,5 @@ int amdgpu_umc_loop_channels(struct amdgpu_device *adev, umc_func func, void *data); int amdgpu_umc_bad_page_polling_timeout(struct amdgpu_device *adev, - bool reset, uint32_t timeout_ms); + uint32_t reset, uint32_t timeout_ms); #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index 650da18b0d87..740f89aafbc0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -134,6 +134,7 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev, { enum amdgpu_ras_block block = 0; int old_poison, ret = -EINVAL; + uint32_t reset = 0; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); if (!p) @@ -153,6 +154,8 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev, case SOC15_IH_CLIENTID_UTCL2: ret = kfd_dqm_evict_pasid(dev->dqm, pasid); block = AMDGPU_RAS_BLOCK__GFX; + if (ret) + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; case SOC15_IH_CLIENTID_SDMA0: case SOC15_IH_CLIENTID_SDMA1: @@ -160,6 +163,7 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev, case SOC15_IH_CLIENTID_SDMA3: case SOC15_IH_CLIENTID_SDMA4: block = AMDGPU_RAS_BLOCK__SDMA; + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; default: break; @@ -170,17 +174,16 @@ static void event_interrupt_poison_consumption(struct kfd_node *dev, /* resetting queue passes, do page retirement without gpu reset * resetting queue fails, fallback to gpu reset solution */ - if (!ret) { + if (!ret) dev_warn(dev->adev->dev, "RAS poison consumption, unmap queue flow succeeded: client id %d\n", client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false); - } else { + else dev_warn(dev->adev->dev, "RAS poison consumption, fall back to gpu reset flow: client id %d\n", client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true); - } + + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset); } static bool event_interrupt_isr_v10(struct kfd_node *dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index 7e2859736a55..d3d6b5c180b3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -193,6 +193,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, { enum amdgpu_ras_block block = 0; int ret = -EINVAL; + uint32_t reset = 0; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); if (!p) @@ -212,10 +213,13 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, if (dev->dqm->ops.reset_queues) ret = dev->dqm->ops.reset_queues(dev->dqm, pasid); block = AMDGPU_RAS_BLOCK__GFX; + if (ret) + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; case SOC21_INTSRC_SDMA_ECC: default: block = AMDGPU_RAS_BLOCK__GFX; + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; } @@ -223,10 +227,7 @@ static void event_interrupt_poison_consumption_v11(struct kfd_node *dev, /* resetting queue passes, do page retirement without gpu reset resetting queue fails, fallback to gpu reset solution */ - if (!ret) - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false); - else - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset); } static bool event_interrupt_isr_v11(struct kfd_node *dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 11641f4645e6..2a37ab7a7150 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -145,6 +145,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, { enum amdgpu_ras_block block = 0; int old_poison, ret = -EINVAL; + uint32_t reset = 0; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); if (!p) @@ -164,6 +165,15 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_UTCL2: ret = kfd_dqm_evict_pasid(dev->dqm, pasid); block = AMDGPU_RAS_BLOCK__GFX; + if (ret) + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + break; + case SOC15_IH_CLIENTID_VMC: + case SOC15_IH_CLIENTID_VMC1: + ret = kfd_dqm_evict_pasid(dev->dqm, pasid); + block = AMDGPU_RAS_BLOCK__MMHUB; + if (ret) + reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; break; case SOC15_IH_CLIENTID_SDMA0: case SOC15_IH_CLIENTID_SDMA1: @@ -171,6 +181,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_SDMA3: case SOC15_IH_CLIENTID_SDMA4: block = AMDGPU_RAS_BLOCK__SDMA; + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; default: break; @@ -181,17 +192,16 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, /* resetting queue passes, do page retirement without gpu reset * resetting queue fails, fallback to gpu reset solution */ - if (!ret) { + if (!ret) dev_warn(dev->adev->dev, "RAS poison consumption, unmap queue flow succeeded: client id %d\n", client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, false); - } else { + else dev_warn(dev->adev->dev, "RAS poison consumption, fall back to gpu reset flow: client id %d\n", client_id); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, true); - } + + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset); } static bool context_id_expected(struct kfd_dev *dev) -- cgit v1.2.3-70-g09d2 From 70dfaa3c02dd168e73e718aa0ab567c0232d58c6 Mon Sep 17 00:00:00 2001 From: Xiaojian Du Date: Fri, 8 Mar 2024 16:36:39 +0800 Subject: drm/amdgpu: add VCN sensor value for SMU 13.0.5 This will add VCN sensor value for SMU 13.0.5. Signed-off-by: Xiaojian Du Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c index 0dce672ac1b9..218f209c3775 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c @@ -286,7 +286,7 @@ static int smu_v13_0_5_get_smu_metrics_data(struct smu_context *smu, *value = metrics->GfxActivity / 100; break; case METRICS_AVERAGE_VCNACTIVITY: - *value = metrics->UvdActivity; + *value = metrics->UvdActivity / 100; break; case METRICS_CURR_SOCKETPOWER: *value = (metrics->CurrentSocketPower << 8) / 1000; @@ -332,6 +332,12 @@ static int smu_v13_0_5_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v13_0_5_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: ret = smu_v13_0_5_get_smu_metrics_data(smu, METRICS_CURR_SOCKETPOWER, -- cgit v1.2.3-70-g09d2 From 865d3397630b806f2df156e2143cdfa416db1e01 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 21 Feb 2024 15:07:30 +0800 Subject: drm/amdgpu: add aca deferred error type support add aca deferred error type support Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 3 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index a0b7f0d0c513..611c1751577a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -434,6 +434,8 @@ static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_er amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, NULL, count); break; case ACA_ERROR_TYPE_DEFERRED: + amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, NULL, count); + break; default: break; } @@ -474,6 +476,7 @@ static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *h smu_type = ACA_SMU_TYPE_UE; break; case ACA_ERROR_TYPE_CE: + case ACA_ERROR_TYPE_DEFERRED: smu_type = ACA_SMU_TYPE_CE; break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 3c6d532824f6..2bc02b809246 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1291,8 +1291,8 @@ ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *a if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, - "ce", info.ce_count); + return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count, + "ce", info.ce_count, "de", info.ue_count); } static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, @@ -1341,6 +1341,10 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data); if (ret) return ret; + + ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data); + if (ret) + return ret; } else { /* FIXME: add code to check return value later */ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx); -- cgit v1.2.3-70-g09d2 From b93d759f540a3d3145b985bbbdbeb98b22862df0 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Wed, 21 Feb 2024 14:36:13 +0800 Subject: drm/amdgpu: add umc v12.0.0 deferred error support add umc v12.0.0 deferred error support. Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 37 ++++++++++++---------------------- 1 file changed, 13 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index e2e18ccbbc62..d29ea2fde025 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -509,36 +509,25 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank { struct amdgpu_device *adev = handle->adev; struct aca_bank_info info; + enum aca_error_type err_type; u64 status; int ret; + status = bank->regs[ACA_REG_IDX_STATUS]; + if (umc_v12_0_is_deferred_error(adev, status)) + err_type = ACA_ERROR_TYPE_DEFERRED; + else if (umc_v12_0_is_uncorrectable_error(adev, status)) + err_type = ACA_ERROR_TYPE_UE; + else if (umc_v12_0_is_correctable_error(adev, status)) + err_type = ACA_ERROR_TYPE_CE; + else + return 0; + ret = aca_bank_info_decode(bank, &info); if (ret) return ret; - status = bank->regs[ACA_REG_IDX_STATUS]; - switch (type) { - case ACA_SMU_TYPE_UE: - if (umc_v12_0_is_uncorrectable_error(adev, status)) { - ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, - 1ULL); - if (ret) - return ret; - } - break; - case ACA_SMU_TYPE_CE: - if (umc_v12_0_is_correctable_error(adev, status)) { - ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, - 1ULL); - if (ret) - return ret; - } - break; - default: - return -EINVAL; - } - - return 0; + return aca_error_cache_log_bank_error(handle, &info, err_type, 1ULL); } static const struct aca_bank_ops umc_v12_0_aca_bank_ops = { @@ -547,7 +536,7 @@ static const struct aca_bank_ops umc_v12_0_aca_bank_ops = { const struct aca_info umc_v12_0_aca_info = { .hwip = ACA_HWIP_TYPE_UMC, - .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK, + .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK | ACA_ERROR_DEFERRED_MASK, .bank_ops = &umc_v12_0_aca_bank_ops, }; -- cgit v1.2.3-70-g09d2 From c9d7f802e698c03a06cce4bbfbbbeb7b7be26f8a Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 19 Dec 2023 19:13:00 +0800 Subject: drm/amdgpu: Add smuio v14_0_2 ip headers (v4) v1: Add smuio v14_0_2 register offset and shift masks header files. (Hawking) v2: Update smuio v14_0_2 register offset and shift masks header files to RE2. (Likun) v3: Update smuio v14_0_2 register offset and shift masks header files to RE2.5. (Likun) v4: Clean up smuio v14_0_2 ip headers (Alex) Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher Signed-off-by: Likun Gao Reviewed-by: Hawking Zhang Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- .../include/asic_reg/smuio/smuio_14_0_2_offset.h | 511 +++++++++ .../include/asic_reg/smuio/smuio_14_0_2_sh_mask.h | 1106 ++++++++++++++++++++ 2 files changed, 1617 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h new file mode 100644 index 000000000000..da7e31fedd58 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h @@ -0,0 +1,511 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _smuio_14_0_2_OFFSET_HEADER +#define _smuio_14_0_2_OFFSET_HEADER + + + +// addressBlock: smuio_smuio_tsc_SmuSmuioDec +// base address: 0x5a8a0 +#define regPWROK_REFCLK_GAP_CYCLES 0x0028 +#define regPWROK_REFCLK_GAP_CYCLES_BASE_IDX 1 +#define regGOLDEN_TSC_INCREMENT_UPPER 0x002b +#define regGOLDEN_TSC_INCREMENT_UPPER_BASE_IDX 1 +#define regGOLDEN_TSC_INCREMENT_LOWER 0x002c +#define regGOLDEN_TSC_INCREMENT_LOWER_BASE_IDX 1 +#define regGOLDEN_TSC_COUNT_UPPER 0x002d +#define regGOLDEN_TSC_COUNT_UPPER_BASE_IDX 1 +#define regGOLDEN_TSC_COUNT_LOWER 0x002e +#define regGOLDEN_TSC_COUNT_LOWER_BASE_IDX 1 +#define regSOC_GOLDEN_TSC_SHADOW_UPPER 0x002f +#define regSOC_GOLDEN_TSC_SHADOW_UPPER_BASE_IDX 1 +#define regSOC_GOLDEN_TSC_SHADOW_LOWER 0x0030 +#define regSOC_GOLDEN_TSC_SHADOW_LOWER_BASE_IDX 1 +#define regSOC_GAP_PWROK 0x0031 +#define regSOC_GAP_PWROK_BASE_IDX 1 + + +// addressBlock: smuio_smuio_swtimer_SmuSmuioDec +// base address: 0x5aca8 +#define regPWR_VIRT_RESET_REQ 0x012a +#define regPWR_VIRT_RESET_REQ_BASE_IDX 1 +#define regPWR_DISP_TIMER_CONTROL 0x012b +#define regPWR_DISP_TIMER_CONTROL_BASE_IDX 1 +#define regPWR_DISP_TIMER_DEBUG 0x012c +#define regPWR_DISP_TIMER_DEBUG_BASE_IDX 1 +#define regPWR_DISP_TIMER2_CONTROL 0x012d +#define regPWR_DISP_TIMER2_CONTROL_BASE_IDX 1 +#define regPWR_DISP_TIMER2_DEBUG 0x012e +#define regPWR_DISP_TIMER2_DEBUG_BASE_IDX 1 +#define regPWR_DISP_TIMER_GLOBAL_CONTROL 0x012f +#define regPWR_DISP_TIMER_GLOBAL_CONTROL_BASE_IDX 1 +#define regPWR_IH_CONTROL 0x0130 +#define regPWR_IH_CONTROL_BASE_IDX 1 + + +// addressBlock: smuio_smuio_misc_SmuSmuioDec +// base address: 0x5a000 +#define regSMUIO_MCM_CONFIG 0x0023 +#define regSMUIO_MCM_CONFIG_BASE_IDX 0 +#define regIP_DISCOVERY_VERSION 0x0000 +#define regIP_DISCOVERY_VERSION_BASE_IDX 1 +#define regSCRATCH_REGISTER0 0x01bd +#define regSCRATCH_REGISTER0_BASE_IDX 1 +#define regSCRATCH_REGISTER1 0x01be +#define regSCRATCH_REGISTER1_BASE_IDX 1 +#define regSCRATCH_REGISTER2 0x01bf +#define regSCRATCH_REGISTER2_BASE_IDX 1 +#define regSCRATCH_REGISTER3 0x01c0 +#define regSCRATCH_REGISTER3_BASE_IDX 1 +#define regSCRATCH_REGISTER4 0x01c1 +#define regSCRATCH_REGISTER4_BASE_IDX 1 +#define regSCRATCH_REGISTER5 0x01c2 +#define regSCRATCH_REGISTER5_BASE_IDX 1 +#define regSCRATCH_REGISTER6 0x01c3 +#define regSCRATCH_REGISTER6_BASE_IDX 1 +#define regSCRATCH_REGISTER7 0x01c4 +#define regSCRATCH_REGISTER7_BASE_IDX 1 + + +// addressBlock: smuio_smuio_i2c_SmuSmuioDec +// base address: 0x5a100 +#define regCKSVII2C_IC_CON 0x0040 +#define regCKSVII2C_IC_CON_BASE_IDX 0 +#define regCKSVII2C_IC_TAR 0x0041 +#define regCKSVII2C_IC_TAR_BASE_IDX 0 +#define regCKSVII2C_IC_SAR 0x0042 +#define regCKSVII2C_IC_SAR_BASE_IDX 0 +#define regCKSVII2C_IC_HS_MADDR 0x0043 +#define regCKSVII2C_IC_HS_MADDR_BASE_IDX 0 +#define regCKSVII2C_IC_DATA_CMD 0x0044 +#define regCKSVII2C_IC_DATA_CMD_BASE_IDX 0 +#define regCKSVII2C_IC_SS_SCL_HCNT 0x0045 +#define regCKSVII2C_IC_SS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C_IC_SS_SCL_LCNT 0x0046 +#define regCKSVII2C_IC_SS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C_IC_FS_SCL_HCNT 0x0047 +#define regCKSVII2C_IC_FS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C_IC_FS_SCL_LCNT 0x0048 +#define regCKSVII2C_IC_FS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C_IC_HS_SCL_HCNT 0x0049 +#define regCKSVII2C_IC_HS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C_IC_HS_SCL_LCNT 0x004a +#define regCKSVII2C_IC_HS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C_IC_INTR_STAT 0x004b +#define regCKSVII2C_IC_INTR_STAT_BASE_IDX 0 +#define regCKSVII2C_IC_INTR_MASK 0x004c +#define regCKSVII2C_IC_INTR_MASK_BASE_IDX 0 +#define regCKSVII2C_IC_RAW_INTR_STAT 0x004d +#define regCKSVII2C_IC_RAW_INTR_STAT_BASE_IDX 0 +#define regCKSVII2C_IC_RX_TL 0x004e +#define regCKSVII2C_IC_RX_TL_BASE_IDX 0 +#define regCKSVII2C_IC_TX_TL 0x004f +#define regCKSVII2C_IC_TX_TL_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_INTR 0x0050 +#define regCKSVII2C_IC_CLR_INTR_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_RX_UNDER 0x0051 +#define regCKSVII2C_IC_CLR_RX_UNDER_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_RX_OVER 0x0052 +#define regCKSVII2C_IC_CLR_RX_OVER_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_TX_OVER 0x0053 +#define regCKSVII2C_IC_CLR_TX_OVER_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_RD_REQ 0x0054 +#define regCKSVII2C_IC_CLR_RD_REQ_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_TX_ABRT 0x0055 +#define regCKSVII2C_IC_CLR_TX_ABRT_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_RX_DONE 0x0056 +#define regCKSVII2C_IC_CLR_RX_DONE_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_ACTIVITY 0x0057 +#define regCKSVII2C_IC_CLR_ACTIVITY_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_STOP_DET 0x0058 +#define regCKSVII2C_IC_CLR_STOP_DET_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_START_DET 0x0059 +#define regCKSVII2C_IC_CLR_START_DET_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_GEN_CALL 0x005a +#define regCKSVII2C_IC_CLR_GEN_CALL_BASE_IDX 0 +#define regCKSVII2C_IC_ENABLE 0x005b +#define regCKSVII2C_IC_ENABLE_BASE_IDX 0 +#define regCKSVII2C_IC_STATUS 0x005c +#define regCKSVII2C_IC_STATUS_BASE_IDX 0 +#define regCKSVII2C_IC_TXFLR 0x005d +#define regCKSVII2C_IC_TXFLR_BASE_IDX 0 +#define regCKSVII2C_IC_RXFLR 0x005e +#define regCKSVII2C_IC_RXFLR_BASE_IDX 0 +#define regCKSVII2C_IC_SDA_HOLD 0x005f +#define regCKSVII2C_IC_SDA_HOLD_BASE_IDX 0 +#define regCKSVII2C_IC_TX_ABRT_SOURCE 0x0060 +#define regCKSVII2C_IC_TX_ABRT_SOURCE_BASE_IDX 0 +#define regCKSVII2C_IC_SLV_DATA_NACK_ONLY 0x0061 +#define regCKSVII2C_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0 +#define regCKSVII2C_IC_DMA_CR 0x0062 +#define regCKSVII2C_IC_DMA_CR_BASE_IDX 0 +#define regCKSVII2C_IC_DMA_TDLR 0x0063 +#define regCKSVII2C_IC_DMA_TDLR_BASE_IDX 0 +#define regCKSVII2C_IC_DMA_RDLR 0x0064 +#define regCKSVII2C_IC_DMA_RDLR_BASE_IDX 0 +#define regCKSVII2C_IC_SDA_SETUP 0x0065 +#define regCKSVII2C_IC_SDA_SETUP_BASE_IDX 0 +#define regCKSVII2C_IC_ACK_GENERAL_CALL 0x0066 +#define regCKSVII2C_IC_ACK_GENERAL_CALL_BASE_IDX 0 +#define regCKSVII2C_IC_ENABLE_STATUS 0x0067 +#define regCKSVII2C_IC_ENABLE_STATUS_BASE_IDX 0 +#define regCKSVII2C_IC_FS_SPKLEN 0x0068 +#define regCKSVII2C_IC_FS_SPKLEN_BASE_IDX 0 +#define regCKSVII2C_IC_HS_SPKLEN 0x0069 +#define regCKSVII2C_IC_HS_SPKLEN_BASE_IDX 0 +#define regCKSVII2C_IC_CLR_RESTART_DET 0x006a +#define regCKSVII2C_IC_CLR_RESTART_DET_BASE_IDX 0 +#define regCKSVII2C_IC_COMP_PARAM_1 0x006d +#define regCKSVII2C_IC_COMP_PARAM_1_BASE_IDX 0 +#define regCKSVII2C_IC_COMP_VERSION 0x006e +#define regCKSVII2C_IC_COMP_VERSION_BASE_IDX 0 +#define regCKSVII2C_IC_COMP_TYPE 0x006f +#define regCKSVII2C_IC_COMP_TYPE_BASE_IDX 0 +#define regCKSVII2C1_IC_CON 0x0080 +#define regCKSVII2C1_IC_CON_BASE_IDX 0 +#define regCKSVII2C1_IC_TAR 0x0081 +#define regCKSVII2C1_IC_TAR_BASE_IDX 0 +#define regCKSVII2C1_IC_SAR 0x0082 +#define regCKSVII2C1_IC_SAR_BASE_IDX 0 +#define regCKSVII2C1_IC_HS_MADDR 0x0083 +#define regCKSVII2C1_IC_HS_MADDR_BASE_IDX 0 +#define regCKSVII2C1_IC_DATA_CMD 0x0084 +#define regCKSVII2C1_IC_DATA_CMD_BASE_IDX 0 +#define regCKSVII2C1_IC_SS_SCL_HCNT 0x0085 +#define regCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_SS_SCL_LCNT 0x0086 +#define regCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_FS_SCL_HCNT 0x0087 +#define regCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_FS_SCL_LCNT 0x0088 +#define regCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_HS_SCL_HCNT 0x0089 +#define regCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_HS_SCL_LCNT 0x008a +#define regCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0 +#define regCKSVII2C1_IC_INTR_STAT 0x008b +#define regCKSVII2C1_IC_INTR_STAT_BASE_IDX 0 +#define regCKSVII2C1_IC_INTR_MASK 0x008c +#define regCKSVII2C1_IC_INTR_MASK_BASE_IDX 0 +#define regCKSVII2C1_IC_RAW_INTR_STAT 0x008d +#define regCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0 +#define regCKSVII2C1_IC_RX_TL 0x008e +#define regCKSVII2C1_IC_RX_TL_BASE_IDX 0 +#define regCKSVII2C1_IC_TX_TL 0x008f +#define regCKSVII2C1_IC_TX_TL_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_INTR 0x0090 +#define regCKSVII2C1_IC_CLR_INTR_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_RX_UNDER 0x0091 +#define regCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_RX_OVER 0x0092 +#define regCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_TX_OVER 0x0093 +#define regCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_RD_REQ 0x0094 +#define regCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_TX_ABRT 0x0095 +#define regCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_RX_DONE 0x0096 +#define regCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_ACTIVITY 0x0097 +#define regCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_STOP_DET 0x0098 +#define regCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_START_DET 0x0099 +#define regCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_GEN_CALL 0x009a +#define regCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0 +#define regCKSVII2C1_IC_ENABLE 0x009b +#define regCKSVII2C1_IC_ENABLE_BASE_IDX 0 +#define regCKSVII2C1_IC_STATUS 0x009c +#define regCKSVII2C1_IC_STATUS_BASE_IDX 0 +#define regCKSVII2C1_IC_TXFLR 0x009d +#define regCKSVII2C1_IC_TXFLR_BASE_IDX 0 +#define regCKSVII2C1_IC_RXFLR 0x009e +#define regCKSVII2C1_IC_RXFLR_BASE_IDX 0 +#define regCKSVII2C1_IC_SDA_HOLD 0x009f +#define regCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0 +#define regCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0 +#define regCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0 +#define regCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1 +#define regCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0 +#define regCKSVII2C1_IC_DMA_CR 0x00a2 +#define regCKSVII2C1_IC_DMA_CR_BASE_IDX 0 +#define regCKSVII2C1_IC_DMA_TDLR 0x00a3 +#define regCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0 +#define regCKSVII2C1_IC_DMA_RDLR 0x00a4 +#define regCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0 +#define regCKSVII2C1_IC_SDA_SETUP 0x00a5 +#define regCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0 +#define regCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6 +#define regCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0 +#define regCKSVII2C1_IC_ENABLE_STATUS 0x00a7 +#define regCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0 +#define regCKSVII2C1_IC_FS_SPKLEN 0x00a8 +#define regCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0 +#define regCKSVII2C1_IC_HS_SPKLEN 0x00a9 +#define regCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0 +#define regCKSVII2C1_IC_CLR_RESTART_DET 0x00aa +#define regCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0 +#define regCKSVII2C1_IC_COMP_PARAM_1 0x00ad +#define regCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0 +#define regCKSVII2C1_IC_COMP_VERSION 0x00ae +#define regCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0 +#define regCKSVII2C1_IC_COMP_TYPE 0x00af +#define regCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0 +#define regSMUIO_PWRMGT 0x018c +#define regSMUIO_PWRMGT_BASE_IDX 0 + + +// addressBlock: smuio_smuio_rom_SmuSmuioDec +// base address: 0x5a380 +#define regROM_CNTL 0x00e0 +#define regROM_CNTL_BASE_IDX 0 +#define regPAGE_MIRROR_CNTL 0x00e1 +#define regPAGE_MIRROR_CNTL_BASE_IDX 0 +#define regROM_STATUS 0x00e2 +#define regROM_STATUS_BASE_IDX 0 +#define regCGTT_ROM_CLK_CTRL0 0x00e3 +#define regCGTT_ROM_CLK_CTRL0_BASE_IDX 0 +#define regROM_INDEX 0x00e4 +#define regROM_INDEX_BASE_IDX 0 +#define regROM_DATA 0x00e5 +#define regROM_DATA_BASE_IDX 0 +#define regROM_START 0x00e6 +#define regROM_START_BASE_IDX 0 +#define regROM_SW_CNTL 0x00e8 +#define regROM_SW_CNTL_BASE_IDX 0 +#define regROM_SW_STATUS 0x00e9 +#define regROM_SW_STATUS_BASE_IDX 0 +#define regROM_SW_COMMAND 0x00ea +#define regROM_SW_COMMAND_BASE_IDX 0 +#define regROM_SW_DATA_1 0x00ec +#define regROM_SW_DATA_1_BASE_IDX 0 +#define regROM_SW_DATA_2 0x00ed +#define regROM_SW_DATA_2_BASE_IDX 0 +#define regROM_SW_DATA_3 0x00ee +#define regROM_SW_DATA_3_BASE_IDX 0 +#define regROM_SW_DATA_4 0x00ef +#define regROM_SW_DATA_4_BASE_IDX 0 +#define regROM_SW_DATA_5 0x00f0 +#define regROM_SW_DATA_5_BASE_IDX 0 +#define regROM_SW_DATA_6 0x00f1 +#define regROM_SW_DATA_6_BASE_IDX 0 +#define regROM_SW_DATA_7 0x00f2 +#define regROM_SW_DATA_7_BASE_IDX 0 +#define regROM_SW_DATA_8 0x00f3 +#define regROM_SW_DATA_8_BASE_IDX 0 +#define regROM_SW_DATA_9 0x00f4 +#define regROM_SW_DATA_9_BASE_IDX 0 +#define regROM_SW_DATA_10 0x00f5 +#define regROM_SW_DATA_10_BASE_IDX 0 +#define regROM_SW_DATA_11 0x00f6 +#define regROM_SW_DATA_11_BASE_IDX 0 +#define regROM_SW_DATA_12 0x00f7 +#define regROM_SW_DATA_12_BASE_IDX 0 +#define regROM_SW_DATA_13 0x00f8 +#define regROM_SW_DATA_13_BASE_IDX 0 +#define regROM_SW_DATA_14 0x00f9 +#define regROM_SW_DATA_14_BASE_IDX 0 +#define regROM_SW_DATA_15 0x00fa +#define regROM_SW_DATA_15_BASE_IDX 0 +#define regROM_SW_DATA_16 0x00fb +#define regROM_SW_DATA_16_BASE_IDX 0 +#define regROM_SW_DATA_17 0x00fc +#define regROM_SW_DATA_17_BASE_IDX 0 +#define regROM_SW_DATA_18 0x00fd +#define regROM_SW_DATA_18_BASE_IDX 0 +#define regROM_SW_DATA_19 0x00fe +#define regROM_SW_DATA_19_BASE_IDX 0 +#define regROM_SW_DATA_20 0x00ff +#define regROM_SW_DATA_20_BASE_IDX 0 +#define regROM_SW_DATA_21 0x0100 +#define regROM_SW_DATA_21_BASE_IDX 0 +#define regROM_SW_DATA_22 0x0101 +#define regROM_SW_DATA_22_BASE_IDX 0 +#define regROM_SW_DATA_23 0x0102 +#define regROM_SW_DATA_23_BASE_IDX 0 +#define regROM_SW_DATA_24 0x0103 +#define regROM_SW_DATA_24_BASE_IDX 0 +#define regROM_SW_DATA_25 0x0104 +#define regROM_SW_DATA_25_BASE_IDX 0 +#define regROM_SW_DATA_26 0x0105 +#define regROM_SW_DATA_26_BASE_IDX 0 +#define regROM_SW_DATA_27 0x0106 +#define regROM_SW_DATA_27_BASE_IDX 0 +#define regROM_SW_DATA_28 0x0107 +#define regROM_SW_DATA_28_BASE_IDX 0 +#define regROM_SW_DATA_29 0x0108 +#define regROM_SW_DATA_29_BASE_IDX 0 +#define regROM_SW_DATA_30 0x0109 +#define regROM_SW_DATA_30_BASE_IDX 0 +#define regROM_SW_DATA_31 0x010a +#define regROM_SW_DATA_31_BASE_IDX 0 +#define regROM_SW_DATA_32 0x010b +#define regROM_SW_DATA_32_BASE_IDX 0 +#define regROM_SW_DATA_33 0x010c +#define regROM_SW_DATA_33_BASE_IDX 0 +#define regROM_SW_DATA_34 0x010d +#define regROM_SW_DATA_34_BASE_IDX 0 +#define regROM_SW_DATA_35 0x010e +#define regROM_SW_DATA_35_BASE_IDX 0 +#define regROM_SW_DATA_36 0x010f +#define regROM_SW_DATA_36_BASE_IDX 0 +#define regROM_SW_DATA_37 0x0110 +#define regROM_SW_DATA_37_BASE_IDX 0 +#define regROM_SW_DATA_38 0x0111 +#define regROM_SW_DATA_38_BASE_IDX 0 +#define regROM_SW_DATA_39 0x0112 +#define regROM_SW_DATA_39_BASE_IDX 0 +#define regROM_SW_DATA_40 0x0113 +#define regROM_SW_DATA_40_BASE_IDX 0 +#define regROM_SW_DATA_41 0x0114 +#define regROM_SW_DATA_41_BASE_IDX 0 +#define regROM_SW_DATA_42 0x0115 +#define regROM_SW_DATA_42_BASE_IDX 0 +#define regROM_SW_DATA_43 0x0116 +#define regROM_SW_DATA_43_BASE_IDX 0 +#define regROM_SW_DATA_44 0x0117 +#define regROM_SW_DATA_44_BASE_IDX 0 +#define regROM_SW_DATA_45 0x0118 +#define regROM_SW_DATA_45_BASE_IDX 0 +#define regROM_SW_DATA_46 0x0119 +#define regROM_SW_DATA_46_BASE_IDX 0 +#define regROM_SW_DATA_47 0x011a +#define regROM_SW_DATA_47_BASE_IDX 0 +#define regROM_SW_DATA_48 0x011b +#define regROM_SW_DATA_48_BASE_IDX 0 +#define regROM_SW_DATA_49 0x011c +#define regROM_SW_DATA_49_BASE_IDX 0 +#define regROM_SW_DATA_50 0x011d +#define regROM_SW_DATA_50_BASE_IDX 0 +#define regROM_SW_DATA_51 0x011e +#define regROM_SW_DATA_51_BASE_IDX 0 +#define regROM_SW_DATA_52 0x011f +#define regROM_SW_DATA_52_BASE_IDX 0 +#define regROM_SW_DATA_53 0x0120 +#define regROM_SW_DATA_53_BASE_IDX 0 +#define regROM_SW_DATA_54 0x0121 +#define regROM_SW_DATA_54_BASE_IDX 0 +#define regROM_SW_DATA_55 0x0122 +#define regROM_SW_DATA_55_BASE_IDX 0 +#define regROM_SW_DATA_56 0x0123 +#define regROM_SW_DATA_56_BASE_IDX 0 +#define regROM_SW_DATA_57 0x0124 +#define regROM_SW_DATA_57_BASE_IDX 0 +#define regROM_SW_DATA_58 0x0125 +#define regROM_SW_DATA_58_BASE_IDX 0 +#define regROM_SW_DATA_59 0x0126 +#define regROM_SW_DATA_59_BASE_IDX 0 +#define regROM_SW_DATA_60 0x0127 +#define regROM_SW_DATA_60_BASE_IDX 0 +#define regROM_SW_DATA_61 0x0128 +#define regROM_SW_DATA_61_BASE_IDX 0 +#define regROM_SW_DATA_62 0x0129 +#define regROM_SW_DATA_62_BASE_IDX 0 +#define regROM_SW_DATA_63 0x012a +#define regROM_SW_DATA_63_BASE_IDX 0 +#define regROM_SW_DATA_64 0x012b +#define regROM_SW_DATA_64_BASE_IDX 0 + + +// addressBlock: smuio_smuio_gpio_SmuSmuioDec +// base address: 0x5a500 +#define regSMU_GPIOPAD_SW_INT_STAT 0x0140 +#define regSMU_GPIOPAD_SW_INT_STAT_BASE_IDX 0 +#define regSMU_GPIOPAD_MASK 0x0141 +#define regSMU_GPIOPAD_MASK_BASE_IDX 0 +#define regSMU_GPIOPAD_A 0x0142 +#define regSMU_GPIOPAD_A_BASE_IDX 0 +#define regSMU_GPIOPAD_TXIMPSEL 0x0143 +#define regSMU_GPIOPAD_TXIMPSEL_BASE_IDX 0 +#define regSMU_GPIOPAD_EN 0x0144 +#define regSMU_GPIOPAD_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_Y 0x0145 +#define regSMU_GPIOPAD_Y_BASE_IDX 0 +#define regSMU_GPIOPAD_RXEN 0x0146 +#define regSMU_GPIOPAD_RXEN_BASE_IDX 0 +#define regSMU_GPIOPAD_RCVR_SEL0 0x0147 +#define regSMU_GPIOPAD_RCVR_SEL0_BASE_IDX 0 +#define regSMU_GPIOPAD_RCVR_SEL1 0x0148 +#define regSMU_GPIOPAD_RCVR_SEL1_BASE_IDX 0 +#define regSMU_GPIOPAD_PU_EN 0x0149 +#define regSMU_GPIOPAD_PU_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_PD_EN 0x014a +#define regSMU_GPIOPAD_PD_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_PINSTRAPS 0x014b +#define regSMU_GPIOPAD_PINSTRAPS_BASE_IDX 0 +#define regDFT_PINSTRAPS 0x014c +#define regDFT_PINSTRAPS_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_STAT_EN 0x014d +#define regSMU_GPIOPAD_INT_STAT_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_STAT 0x014e +#define regSMU_GPIOPAD_INT_STAT_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_STAT_AK 0x014f +#define regSMU_GPIOPAD_INT_STAT_AK_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_EN 0x0150 +#define regSMU_GPIOPAD_INT_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_TYPE 0x0151 +#define regSMU_GPIOPAD_INT_TYPE_BASE_IDX 0 +#define regSMU_GPIOPAD_INT_POLARITY 0x0152 +#define regSMU_GPIOPAD_INT_POLARITY_BASE_IDX 0 +#define regSMUIO_PCC_GPIO_SELECT 0x0155 +#define regSMUIO_PCC_GPIO_SELECT_BASE_IDX 0 +#define regSMU_GPIOPAD_S0 0x0156 +#define regSMU_GPIOPAD_S0_BASE_IDX 0 +#define regSMU_GPIOPAD_S1 0x0157 +#define regSMU_GPIOPAD_S1_BASE_IDX 0 +#define regSMU_GPIOPAD_SCHMEN 0x0158 +#define regSMU_GPIOPAD_SCHMEN_BASE_IDX 0 +#define regSMU_GPIOPAD_SCL_EN 0x0159 +#define regSMU_GPIOPAD_SCL_EN_BASE_IDX 0 +#define regSMU_GPIOPAD_SDA_EN 0x015a +#define regSMU_GPIOPAD_SDA_EN_BASE_IDX 0 +#define regSMUIO_GPIO_INT0_SELECT 0x015b +#define regSMUIO_GPIO_INT0_SELECT_BASE_IDX 0 +#define regSMUIO_GPIO_INT1_SELECT 0x015c +#define regSMUIO_GPIO_INT1_SELECT_BASE_IDX 0 +#define regSMUIO_GPIO_INT2_SELECT 0x015d +#define regSMUIO_GPIO_INT2_SELECT_BASE_IDX 0 +#define regSMUIO_GPIO_INT3_SELECT 0x015e +#define regSMUIO_GPIO_INT3_SELECT_BASE_IDX 0 +#define regSMU_GPIOPAD_MP_INT0_STAT 0x015f +#define regSMU_GPIOPAD_MP_INT0_STAT_BASE_IDX 0 +#define regSMU_GPIOPAD_MP_INT1_STAT 0x0160 +#define regSMU_GPIOPAD_MP_INT1_STAT_BASE_IDX 0 +#define regSMU_GPIOPAD_MP_INT2_STAT 0x0161 +#define regSMU_GPIOPAD_MP_INT2_STAT_BASE_IDX 0 +#define regSMU_GPIOPAD_MP_INT3_STAT 0x0162 +#define regSMU_GPIOPAD_MP_INT3_STAT_BASE_IDX 0 +#define regSMIO_INDEX 0x0163 +#define regSMIO_INDEX_BASE_IDX 0 +#define regS0_VID_SMIO_CNTL 0x0164 +#define regS0_VID_SMIO_CNTL_BASE_IDX 0 +#define regS1_VID_SMIO_CNTL 0x0165 +#define regS1_VID_SMIO_CNTL_BASE_IDX 0 +#define regOPEN_DRAIN_SELECT 0x0166 +#define regOPEN_DRAIN_SELECT_BASE_IDX 0 +#define regSMIO_ENABLE 0x0167 +#define regSMIO_ENABLE_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h new file mode 100644 index 000000000000..6204505e553b --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h @@ -0,0 +1,1106 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _smuio_14_0_2_SH_MASK_HEADER +#define _smuio_14_0_2_SH_MASK_HEADER + + +// addressBlock: smuio_smuio_tsc_SmuSmuioDec +//PWROK_REFCLK_GAP_CYCLES +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles__SHIFT 0x0 +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles__SHIFT 0x8 +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PreAssertion_clkgap_cycles_MASK 0x000000FFL +#define PWROK_REFCLK_GAP_CYCLES__Pwrok_PostAssertion_clkgap_cycles_MASK 0x0000FF00L +//GOLDEN_TSC_INCREMENT_UPPER +#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper__SHIFT 0x0 +#define GOLDEN_TSC_INCREMENT_UPPER__GoldenTscIncrementUpper_MASK 0x00FFFFFFL +//GOLDEN_TSC_INCREMENT_LOWER +#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower__SHIFT 0x0 +#define GOLDEN_TSC_INCREMENT_LOWER__GoldenTscIncrementLower_MASK 0xFFFFFFFFL +//GOLDEN_TSC_COUNT_UPPER +#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper__SHIFT 0x0 +#define GOLDEN_TSC_COUNT_UPPER__GoldenTscCountUpper_MASK 0x00FFFFFFL +//GOLDEN_TSC_COUNT_LOWER +#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower__SHIFT 0x0 +#define GOLDEN_TSC_COUNT_LOWER__GoldenTscCountLower_MASK 0xFFFFFFFFL +//SOC_GOLDEN_TSC_SHADOW_UPPER +#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper__SHIFT 0x0 +#define SOC_GOLDEN_TSC_SHADOW_UPPER__SocGoldenTscShadowUpper_MASK 0x00FFFFFFL +//SOC_GOLDEN_TSC_SHADOW_LOWER +#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower__SHIFT 0x0 +#define SOC_GOLDEN_TSC_SHADOW_LOWER__SocGoldenTscShadowLower_MASK 0xFFFFFFFFL +//SOC_GAP_PWROK +#define SOC_GAP_PWROK__soc_gap_pwrok__SHIFT 0x0 +#define SOC_GAP_PWROK__soc_gap_pwrok_MASK 0x00000001L + + +// addressBlock: smuio_smuio_swtimer_SmuSmuioDec +//PWR_VIRT_RESET_REQ +#define PWR_VIRT_RESET_REQ__VF_FLR__SHIFT 0x0 +#define PWR_VIRT_RESET_REQ__PF_FLR__SHIFT 0x1f +#define PWR_VIRT_RESET_REQ__VF_FLR_MASK 0x7FFFFFFFL +#define PWR_VIRT_RESET_REQ__PF_FLR_MASK 0x80000000L +//PWR_DISP_TIMER_CONTROL +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L +#define PWR_DISP_TIMER_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L +//PWR_DISP_TIMER_DEBUG +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT__SHIFT 0x2 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7 +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_INT_MASK 0x00000004L +#define PWR_DISP_TIMER_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L +//PWR_DISP_TIMER2_CONTROL +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT__SHIFT 0x0 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE__SHIFT 0x19 +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE__SHIFT 0x1a +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK__SHIFT 0x1b +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK__SHIFT 0x1c +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE__SHIFT 0x1d +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE__SHIFT 0x1e +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_COUNT_MASK 0x01FFFFFFL +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_ENABLE_MASK 0x02000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_DISABLE_MASK 0x04000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MASK_MASK 0x08000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_STAT_AK_MASK 0x10000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_TYPE_MASK 0x20000000L +#define PWR_DISP_TIMER2_CONTROL__DISP_TIMER_INT_MODE_MASK 0x40000000L +//PWR_DISP_TIMER2_DEBUG +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING__SHIFT 0x0 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT__SHIFT 0x1 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT__SHIFT 0x2 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL__SHIFT 0x7 +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_RUNNING_MASK 0x00000001L +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_STAT_MASK 0x00000002L +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_INT_MASK 0x00000004L +#define PWR_DISP_TIMER2_DEBUG__DISP_TIMER_RUN_VAL_MASK 0xFFFFFF80L +//PWR_DISP_TIMER_GLOBAL_CONTROL +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH__SHIFT 0x0 +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN__SHIFT 0xa +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_WIDTH_MASK 0x000003FFL +#define PWR_DISP_TIMER_GLOBAL_CONTROL__DISP_TIMER_PULSE_EN_MASK 0x00000400L +//PWR_IH_CONTROL +#define PWR_IH_CONTROL__MAX_CREDIT__SHIFT 0x0 +#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK__SHIFT 0x5 +#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK__SHIFT 0x6 +#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN__SHIFT 0x1f +#define PWR_IH_CONTROL__MAX_CREDIT_MASK 0x0000001FL +#define PWR_IH_CONTROL__DISP_TIMER_TRIGGER_MASK_MASK 0x00000020L +#define PWR_IH_CONTROL__DISP_TIMER2_TRIGGER_MASK_MASK 0x00000040L +#define PWR_IH_CONTROL__PWR_IH_CLK_GATE_EN_MASK 0x80000000L + + +// addressBlock: smuio_smuio_misc_SmuSmuioDec +//SMUIO_MCM_CONFIG +#define SMUIO_MCM_CONFIG__DIE_ID__SHIFT 0x0 +#define SMUIO_MCM_CONFIG__PKG_TYPE__SHIFT 0x2 +#define SMUIO_MCM_CONFIG__SOCKET_ID__SHIFT 0x8 +#define SMUIO_MCM_CONFIG__PKG_SUBTYPE__SHIFT 0xc +#define SMUIO_MCM_CONFIG__DIE_CONFIG__SHIFT 0xd +#define SMUIO_MCM_CONFIG__CONSOLE_K__SHIFT 0x10 +#define SMUIO_MCM_CONFIG__CONSOLE_A__SHIFT 0x11 +#define SMUIO_MCM_CONFIG__DIE_ID_MASK 0x00000003L +#define SMUIO_MCM_CONFIG__PKG_TYPE_MASK 0x0000001CL +#define SMUIO_MCM_CONFIG__SOCKET_ID_MASK 0x00000300L +#define SMUIO_MCM_CONFIG__PKG_SUBTYPE_MASK 0x00001000L +#define SMUIO_MCM_CONFIG__CONSOLE_K_MASK 0x00010000L +#define SMUIO_MCM_CONFIG__CONSOLE_A_MASK 0x00020000L +//IP_DISCOVERY_VERSION +#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION__SHIFT 0x0 +#define IP_DISCOVERY_VERSION__IP_DISCOVERY_VERSION_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER0 +#define SCRATCH_REGISTER0__ScratchPad0__SHIFT 0x0 +#define SCRATCH_REGISTER0__ScratchPad0_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER1 +#define SCRATCH_REGISTER1__ScratchPad1__SHIFT 0x0 +#define SCRATCH_REGISTER1__ScratchPad1_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER2 +#define SCRATCH_REGISTER2__ScratchPad2__SHIFT 0x0 +#define SCRATCH_REGISTER2__ScratchPad2_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER3 +#define SCRATCH_REGISTER3__ScratchPad3__SHIFT 0x0 +#define SCRATCH_REGISTER3__ScratchPad3_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER4 +#define SCRATCH_REGISTER4__ScratchPad4__SHIFT 0x0 +#define SCRATCH_REGISTER4__ScratchPad4_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER5 +#define SCRATCH_REGISTER5__ScratchPad5__SHIFT 0x0 +#define SCRATCH_REGISTER5__ScratchPad5_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER6 +#define SCRATCH_REGISTER6__ScratchPad6__SHIFT 0x0 +#define SCRATCH_REGISTER6__ScratchPad6_MASK 0xFFFFFFFFL +//SCRATCH_REGISTER7 +#define SCRATCH_REGISTER7__ScratchPad7__SHIFT 0x0 +#define SCRATCH_REGISTER7__ScratchPad7_MASK 0xFFFFFFFFL + + +// addressBlock: smuio_smuio_i2c_SmuSmuioDec +//CKSVII2C_IC_CON +#define CKSVII2C_IC_CON__IC_MASTER_MODE__SHIFT 0x0 +#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE__SHIFT 0x1 +#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE__SHIFT 0x3 +#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER__SHIFT 0x4 +#define CKSVII2C_IC_CON__IC_RESTART_EN__SHIFT 0x5 +#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE__SHIFT 0x6 +#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED__SHIFT 0x7 +#define CKSVII2C_IC_CON__TX_EMPTY_CTRL__SHIFT 0x8 +#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL__SHIFT 0x9 +#define CKSVII2C_IC_CON__BUS_CLEAR_FEATURE_CTRL__SHIFT 0xb +#define CKSVII2C_IC_CON__IC_MASTER_MODE_MASK 0x00000001L +#define CKSVII2C_IC_CON__IC_MAX_SPEED_MODE_MASK 0x00000006L +#define CKSVII2C_IC_CON__IC_10BITADDR_SLAVE_MASK 0x00000008L +#define CKSVII2C_IC_CON__IC_10BITADDR_MASTER_MASK 0x00000010L +#define CKSVII2C_IC_CON__IC_RESTART_EN_MASK 0x00000020L +#define CKSVII2C_IC_CON__IC_SLAVE_DISABLE_MASK 0x00000040L +#define CKSVII2C_IC_CON__STOP_DET_IFADDRESSED_MASK 0x00000080L +#define CKSVII2C_IC_CON__TX_EMPTY_CTRL_MASK 0x00000100L +#define CKSVII2C_IC_CON__RX_FIFO_FULL_HLD_CTRL_MASK 0x00000200L +//CKSVII2C_IC_TAR +#define CKSVII2C_IC_TAR__IC_TAR__SHIFT 0x0 +#define CKSVII2C_IC_TAR__GC_OR_START__SHIFT 0xa +#define CKSVII2C_IC_TAR__SPECIAL__SHIFT 0xb +#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER__SHIFT 0xc +#define CKSVII2C_IC_TAR__IC_TAR_MASK 0x000003FFL +#define CKSVII2C_IC_TAR__GC_OR_START_MASK 0x00000400L +#define CKSVII2C_IC_TAR__SPECIAL_MASK 0x00000800L +#define CKSVII2C_IC_TAR__IC_10BITADDR_MASTER_MASK 0x00001000L +//CKSVII2C_IC_SAR +#define CKSVII2C_IC_SAR__IC_SAR__SHIFT 0x0 +#define CKSVII2C_IC_SAR__IC_SAR_MASK 0x000003FFL +//CKSVII2C_IC_HS_MADDR +#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR__SHIFT 0x0 +#define CKSVII2C_IC_HS_MADDR__IC_HS_MADDR_MASK 0x00000007L +//CKSVII2C_IC_DATA_CMD +#define CKSVII2C_IC_DATA_CMD__DAT__SHIFT 0x0 +#define CKSVII2C_IC_DATA_CMD__CMD__SHIFT 0x8 +#define CKSVII2C_IC_DATA_CMD__STOP__SHIFT 0x9 +#define CKSVII2C_IC_DATA_CMD__RESTART__SHIFT 0xa +#define CKSVII2C_IC_DATA_CMD__FIRST_DATA_BYTE__SHIFT 0xb +#define CKSVII2C_IC_DATA_CMD__DAT_MASK 0x000000FFL +#define CKSVII2C_IC_DATA_CMD__CMD_MASK 0x00000100L +#define CKSVII2C_IC_DATA_CMD__STOP_MASK 0x00000200L +#define CKSVII2C_IC_DATA_CMD__RESTART_MASK 0x00000400L +//CKSVII2C_IC_SS_SCL_HCNT +#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C_IC_SS_SCL_HCNT__IC_SS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_SS_SCL_LCNT +#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C_IC_SS_SCL_LCNT__IC_SS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_FS_SCL_HCNT +#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C_IC_FS_SCL_HCNT__IC_FS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_FS_SCL_LCNT +#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C_IC_FS_SCL_LCNT__IC_FS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_HS_SCL_HCNT +#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C_IC_HS_SCL_HCNT__IC_HS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_HS_SCL_LCNT +#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C_IC_HS_SCL_LCNT__IC_HS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C_IC_INTR_STAT +#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER__SHIFT 0x0 +#define CKSVII2C_IC_INTR_STAT__R_RX_OVER__SHIFT 0x1 +#define CKSVII2C_IC_INTR_STAT__R_RX_FULL__SHIFT 0x2 +#define CKSVII2C_IC_INTR_STAT__R_TX_OVER__SHIFT 0x3 +#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C_IC_INTR_STAT__R_RD_REQ__SHIFT 0x5 +#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT__SHIFT 0x6 +#define CKSVII2C_IC_INTR_STAT__R_RX_DONE__SHIFT 0x7 +#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY__SHIFT 0x8 +#define CKSVII2C_IC_INTR_STAT__R_STOP_DET__SHIFT 0x9 +#define CKSVII2C_IC_INTR_STAT__R_START_DET__SHIFT 0xa +#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL__SHIFT 0xb +#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET__SHIFT 0xc +#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C_IC_INTR_STAT__R_SCL_STUCK_AT_LOW__SHIFT 0xe +#define CKSVII2C_IC_INTR_STAT__R_RX_UNDER_MASK 0x00000001L +#define CKSVII2C_IC_INTR_STAT__R_RX_OVER_MASK 0x00000002L +#define CKSVII2C_IC_INTR_STAT__R_RX_FULL_MASK 0x00000004L +#define CKSVII2C_IC_INTR_STAT__R_TX_OVER_MASK 0x00000008L +#define CKSVII2C_IC_INTR_STAT__R_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C_IC_INTR_STAT__R_RD_REQ_MASK 0x00000020L +#define CKSVII2C_IC_INTR_STAT__R_TX_ABRT_MASK 0x00000040L +#define CKSVII2C_IC_INTR_STAT__R_RX_DONE_MASK 0x00000080L +#define CKSVII2C_IC_INTR_STAT__R_ACTIVITY_MASK 0x00000100L +#define CKSVII2C_IC_INTR_STAT__R_STOP_DET_MASK 0x00000200L +#define CKSVII2C_IC_INTR_STAT__R_START_DET_MASK 0x00000400L +#define CKSVII2C_IC_INTR_STAT__R_GEN_CALL_MASK 0x00000800L +#define CKSVII2C_IC_INTR_STAT__R_RESTART_DET_MASK 0x00001000L +#define CKSVII2C_IC_INTR_STAT__R_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C_IC_INTR_MASK +#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER__SHIFT 0x0 +#define CKSVII2C_IC_INTR_MASK__M_RX_OVER__SHIFT 0x1 +#define CKSVII2C_IC_INTR_MASK__M_RX_FULL__SHIFT 0x2 +#define CKSVII2C_IC_INTR_MASK__M_TX_OVER__SHIFT 0x3 +#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C_IC_INTR_MASK__M_RD_REQ__SHIFT 0x5 +#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT__SHIFT 0x6 +#define CKSVII2C_IC_INTR_MASK__M_RX_DONE__SHIFT 0x7 +#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY__SHIFT 0x8 +#define CKSVII2C_IC_INTR_MASK__M_STOP_DET__SHIFT 0x9 +#define CKSVII2C_IC_INTR_MASK__M_START_DET__SHIFT 0xa +#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL__SHIFT 0xb +#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET__SHIFT 0xc +#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C_IC_INTR_MASK__M_SCL_STUCK_AT_LOW__SHIFT 0xe +#define CKSVII2C_IC_INTR_MASK__M_RX_UNDER_MASK 0x00000001L +#define CKSVII2C_IC_INTR_MASK__M_RX_OVER_MASK 0x00000002L +#define CKSVII2C_IC_INTR_MASK__M_RX_FULL_MASK 0x00000004L +#define CKSVII2C_IC_INTR_MASK__M_TX_OVER_MASK 0x00000008L +#define CKSVII2C_IC_INTR_MASK__M_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C_IC_INTR_MASK__M_RD_REQ_MASK 0x00000020L +#define CKSVII2C_IC_INTR_MASK__M_TX_ABRT_MASK 0x00000040L +#define CKSVII2C_IC_INTR_MASK__M_RX_DONE_MASK 0x00000080L +#define CKSVII2C_IC_INTR_MASK__M_ACTIVITY_MASK 0x00000100L +#define CKSVII2C_IC_INTR_MASK__M_STOP_DET_MASK 0x00000200L +#define CKSVII2C_IC_INTR_MASK__M_START_DET_MASK 0x00000400L +#define CKSVII2C_IC_INTR_MASK__M_GEN_CALL_MASK 0x00000800L +#define CKSVII2C_IC_INTR_MASK__M_RESTART_DET_MASK 0x00001000L +#define CKSVII2C_IC_INTR_MASK__M_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C_IC_RAW_INTR_STAT +//CKSVII2C_IC_RX_TL +#define CKSVII2C_IC_RX_TL__RX_TL__SHIFT 0x0 +//CKSVII2C_IC_TX_TL +#define CKSVII2C_IC_TX_TL__TX_TL__SHIFT 0x0 +//CKSVII2C_IC_CLR_INTR +//CKSVII2C_IC_CLR_RX_UNDER +//CKSVII2C_IC_CLR_RX_OVER +//CKSVII2C_IC_CLR_TX_OVER +//CKSVII2C_IC_CLR_RD_REQ +//CKSVII2C_IC_CLR_TX_ABRT +//CKSVII2C_IC_CLR_RX_DONE +//CKSVII2C_IC_CLR_ACTIVITY +//CKSVII2C_IC_CLR_STOP_DET +//CKSVII2C_IC_CLR_START_DET +//CKSVII2C_IC_CLR_GEN_CALL +//CKSVII2C_IC_ENABLE +#define CKSVII2C_IC_ENABLE__ENABLE__SHIFT 0x0 +#define CKSVII2C_IC_ENABLE__ABORT__SHIFT 0x1 +#define CKSVII2C_IC_ENABLE__TX_CMD_BLOCK__SHIFT 0x2 +#define CKSVII2C_IC_ENABLE__SDA_STUCK_RECOVERY_ENABLE__SHIFT 0x3 +#define CKSVII2C_IC_ENABLE__ENABLE_MASK 0x00000001L +#define CKSVII2C_IC_ENABLE__ABORT_MASK 0x00000002L +//CKSVII2C_IC_STATUS +#define CKSVII2C_IC_STATUS__ACTIVITY__SHIFT 0x0 +#define CKSVII2C_IC_STATUS__TFNF__SHIFT 0x1 +#define CKSVII2C_IC_STATUS__TFE__SHIFT 0x2 +#define CKSVII2C_IC_STATUS__RFNE__SHIFT 0x3 +#define CKSVII2C_IC_STATUS__RFF__SHIFT 0x4 +#define CKSVII2C_IC_STATUS__MST_ACTIVITY__SHIFT 0x5 +#define CKSVII2C_IC_STATUS__SLV_ACTIVITY__SHIFT 0x6 +#define CKSVII2C_IC_STATUS__MST_HOLD_TX_FIFO_EMPTY__SHIFT 0x7 +#define CKSVII2C_IC_STATUS__MST_HOLD_RX_FIFO_FULL__SHIFT 0x8 +#define CKSVII2C_IC_STATUS__SLV_HOLD_TX_FIFO_EMPTY__SHIFT 0x9 +#define CKSVII2C_IC_STATUS__SLV_HOLD_RX_FIFO_FULL__SHIFT 0xa +#define CKSVII2C_IC_STATUS__SDA_STUCK_NOT_RECOVERED__SHIFT 0xb +#define CKSVII2C_IC_STATUS__ACTIVITY_MASK 0x00000001L +#define CKSVII2C_IC_STATUS__TFNF_MASK 0x00000002L +#define CKSVII2C_IC_STATUS__TFE_MASK 0x00000004L +#define CKSVII2C_IC_STATUS__RFNE_MASK 0x00000008L +#define CKSVII2C_IC_STATUS__RFF_MASK 0x00000010L +#define CKSVII2C_IC_STATUS__MST_ACTIVITY_MASK 0x00000020L +#define CKSVII2C_IC_STATUS__SLV_ACTIVITY_MASK 0x00000040L +//CKSVII2C_IC_TXFLR +#define CKSVII2C_IC_TXFLR__TXFLR__SHIFT 0x0 +//CKSVII2C_IC_RXFLR +#define CKSVII2C_IC_RXFLR__RXFLR__SHIFT 0x0 +//CKSVII2C_IC_SDA_HOLD +#define CKSVII2C_IC_SDA_HOLD__IC_SDA_TX_HOLD__SHIFT 0x0 +#define CKSVII2C_IC_SDA_HOLD__IC_SDA_RX_HOLD__SHIFT 0x10 +//CKSVII2C_IC_TX_ABRT_SOURCE +//CKSVII2C_IC_SLV_DATA_NACK_ONLY +//CKSVII2C_IC_DMA_CR +//CKSVII2C_IC_DMA_TDLR +//CKSVII2C_IC_DMA_RDLR +//CKSVII2C_IC_SDA_SETUP +#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP__SHIFT 0x0 +#define CKSVII2C_IC_SDA_SETUP__SDA_SETUP_MASK 0x000000FFL +//CKSVII2C_IC_ACK_GENERAL_CALL +#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL__SHIFT 0x0 +#define CKSVII2C_IC_ACK_GENERAL_CALL__ACK_GENERAL_CALL_MASK 0x00000001L +//CKSVII2C_IC_ENABLE_STATUS +#define CKSVII2C_IC_ENABLE_STATUS__IC_EN__SHIFT 0x0 +#define CKSVII2C_IC_ENABLE_STATUS__SLV_DISABLED_WHILE_BUSY__SHIFT 0x1 +#define CKSVII2C_IC_ENABLE_STATUS__SLV_RX_DATA_LOST__SHIFT 0x2 +#define CKSVII2C_IC_ENABLE_STATUS__IC_EN_MASK 0x00000001L +//CKSVII2C_IC_FS_SPKLEN +#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN__SHIFT 0x0 +#define CKSVII2C_IC_FS_SPKLEN__FS_SPKLEN_MASK 0x000000FFL +//CKSVII2C_IC_HS_SPKLEN +#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN__SHIFT 0x0 +#define CKSVII2C_IC_HS_SPKLEN__HS_SPKLEN_MASK 0x000000FFL +//CKSVII2C_IC_CLR_RESTART_DET +//CKSVII2C_IC_COMP_PARAM_1 +#define CKSVII2C_IC_COMP_PARAM_1__APB_DATA_WIDTH__SHIFT 0x0 +#define CKSVII2C_IC_COMP_PARAM_1__MAX_SPEED_MODE__SHIFT 0x2 +#define CKSVII2C_IC_COMP_PARAM_1__HC_COUNT_VALUES__SHIFT 0x4 +#define CKSVII2C_IC_COMP_PARAM_1__INTR_IO__SHIFT 0x5 +#define CKSVII2C_IC_COMP_PARAM_1__HAS_DMA__SHIFT 0x6 +#define CKSVII2C_IC_COMP_PARAM_1__ADD_ENCODED_PARAMS__SHIFT 0x7 +#define CKSVII2C_IC_COMP_PARAM_1__RX_BUFFER_DEPTH__SHIFT 0x8 +#define CKSVII2C_IC_COMP_PARAM_1__TX_BUFFER_DEPTH__SHIFT 0x10 +//CKSVII2C_IC_COMP_VERSION +#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION__SHIFT 0x0 +#define CKSVII2C_IC_COMP_VERSION__COMP_VERSION_MASK 0xFFFFFFFFL +//CKSVII2C_IC_COMP_TYPE +#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0 +#define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL +//CKSVII2C1_IC_CON +#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0 +#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1 +#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3 +#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4 +#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5 +#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6 +#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7 +#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8 +#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9 +#define CKSVII2C1_IC_CON__BUS_CLEAR_FEATURE_CTRL1__SHIFT 0xb +#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L +#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L +#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L +#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L +#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L +#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L +#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L +#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L +#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L +//CKSVII2C1_IC_TAR +#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0 +#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa +#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb +#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc +#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL +#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L +#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L +#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L +//CKSVII2C1_IC_SAR +#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0 +#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL +//CKSVII2C1_IC_HS_MADDR +#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0 +#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L +//CKSVII2C1_IC_DATA_CMD +#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0 +#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8 +#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9 +#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa +#define CKSVII2C1_IC_DATA_CMD__FIRST1_DATA_BYTE__SHIFT 0xb +#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL +#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L +#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L +#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L +//CKSVII2C1_IC_SS_SCL_HCNT +#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_SS_SCL_LCNT +#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_FS_SCL_HCNT +#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_FS_SCL_LCNT +#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_HS_SCL_HCNT +#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_HS_SCL_LCNT +#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_INTR_STAT +#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7 +#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8 +#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9 +#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa +#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb +#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc +#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C1_IC_INTR_STAT__R1_SCL_STUCK_AT_LOW__SHIFT 0xe +#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L +#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L +#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L +#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L +#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L +#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L +#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C1_IC_INTR_MASK +#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7 +#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8 +#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9 +#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa +#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb +#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc +#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C1_IC_INTR_MASK__M1_SCL_STUCK_AT_LOW__SHIFT 0xe +#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L +#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L +#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L +#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L +#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L +#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L +#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C1_IC_RAW_INTR_STAT +//CKSVII2C1_IC_RX_TL +#define CKSVII2C1_IC_RX_TL__RX1_TL__SHIFT 0x0 +//CKSVII2C1_IC_TX_TL +#define CKSVII2C1_IC_TX_TL__TX1_TL__SHIFT 0x0 +//CKSVII2C1_IC_CLR_INTR +//CKSVII2C1_IC_CLR_RX_UNDER +//CKSVII2C1_IC_CLR_RX_OVER +//CKSVII2C1_IC_CLR_TX_OVER +//CKSVII2C1_IC_CLR_RD_REQ +//CKSVII2C1_IC_CLR_TX_ABRT +//CKSVII2C1_IC_CLR_RX_DONE +//CKSVII2C1_IC_CLR_ACTIVITY +//CKSVII2C1_IC_CLR_STOP_DET +//CKSVII2C1_IC_CLR_START_DET +//CKSVII2C1_IC_CLR_GEN_CALL +//CKSVII2C1_IC_ENABLE +#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0 +#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1 +#define CKSVII2C1_IC_ENABLE__TX1_CMD_BLOCK__SHIFT 0x2 +#define CKSVII2C1_IC_ENABLE__SDA1_STUCK_RECOVERY_ENABLE__SHIFT 0x3 +#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L +#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L +//CKSVII2C1_IC_STATUS +#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0 +#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1 +#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2 +#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3 +#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4 +#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5 +#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6 +#define CKSVII2C1_IC_STATUS__MST1_HOLD_TX_FIFO_EMPTY__SHIFT 0x7 +#define CKSVII2C1_IC_STATUS__MST1_HOLD_RX_FIFO_FULL__SHIFT 0x8 +#define CKSVII2C1_IC_STATUS__SLV1_HOLD_TX_FIFO_EMPTY__SHIFT 0x9 +#define CKSVII2C1_IC_STATUS__SLV1_HOLD_RX_FIFO_FULL__SHIFT 0xa +#define CKSVII2C1_IC_STATUS__SDA1_STUCK_NOT_RECOVERED__SHIFT 0xb +#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L +#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L +#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L +#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L +#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L +#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L +#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L +//CKSVII2C1_IC_TXFLR +#define CKSVII2C1_IC_TXFLR__TXFLR1__SHIFT 0x0 +//CKSVII2C1_IC_RXFLR +#define CKSVII2C1_IC_RXFLR__RXFLR1__SHIFT 0x0 +//CKSVII2C1_IC_SDA_HOLD +#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_TX_HOLD__SHIFT 0x0 +#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_RX_HOLD__SHIFT 0x10 +//CKSVII2C1_IC_TX_ABRT_SOURCE +//CKSVII2C1_IC_SLV_DATA_NACK_ONLY +//CKSVII2C1_IC_DMA_CR +//CKSVII2C1_IC_DMA_TDLR +//CKSVII2C1_IC_DMA_RDLR +//CKSVII2C1_IC_SDA_SETUP +#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0 +#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL +//CKSVII2C1_IC_ACK_GENERAL_CALL +#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0 +#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L +//CKSVII2C1_IC_ENABLE_STATUS +#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0 +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_DISABLED_WHILE_BUSY__SHIFT 0x1 +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_DATA_LOST__SHIFT 0x2 +#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L +//CKSVII2C1_IC_FS_SPKLEN +#define CKSVII2C1_IC_FS_SPKLEN__FS1_SPKLEN__SHIFT 0x0 +//CKSVII2C1_IC_HS_SPKLEN +#define CKSVII2C1_IC_HS_SPKLEN__HS1_SPKLEN__SHIFT 0x0 +//CKSVII2C1_IC_CLR_RESTART_DET +//CKSVII2C1_IC_COMP_PARAM_1 +#define CKSVII2C1_IC_COMP_PARAM_1__APB1_DATA_WIDTH__SHIFT 0x0 +#define CKSVII2C1_IC_COMP_PARAM_1__MAX1_SPEED_MODE__SHIFT 0x2 +#define CKSVII2C1_IC_COMP_PARAM_1__HC1_COUNT_VALUES__SHIFT 0x4 +#define CKSVII2C1_IC_COMP_PARAM_1__INTR1_IO__SHIFT 0x5 +#define CKSVII2C1_IC_COMP_PARAM_1__HAS1_DMA__SHIFT 0x6 +#define CKSVII2C1_IC_COMP_PARAM_1__ADD1_ENCODED_PARAMS__SHIFT 0x7 +#define CKSVII2C1_IC_COMP_PARAM_1__RX1_BUFFER_DEPTH__SHIFT 0x8 +#define CKSVII2C1_IC_COMP_PARAM_1__TX1_BUFFER_DEPTH__SHIFT 0x10 +//CKSVII2C1_IC_COMP_VERSION +#define CKSVII2C1_IC_COMP_VERSION__COMP1_VERSION__SHIFT 0x0 +//CKSVII2C1_IC_COMP_TYPE +#define CKSVII2C1_IC_COMP_TYPE__COMP1_TYPE__SHIFT 0x0 +//SMUIO_PWRMGT +#define SMUIO_PWRMGT__i2c_clk_gate_en__SHIFT 0x0 +#define SMUIO_PWRMGT__i2c1_clk_gate_en__SHIFT 0x4 +#define SMUIO_PWRMGT__i2c_clk_gate_en_MASK 0x00000001L +#define SMUIO_PWRMGT__i2c1_clk_gate_en_MASK 0x00000010L + + +// addressBlock: smuio_smuio_rom_SmuSmuioDec +//ROM_CNTL +#define ROM_CNTL__CLOCK_GATING_EN__SHIFT 0x0 +#define ROM_CNTL__READ_MODE__SHIFT 0x1 +#define ROM_CNTL__READ_MODE_OVERRIDE__SHIFT 0x3 +#define ROM_CNTL__SPI_TIMING_RELAX_SCK__SHIFT 0x4 +#define ROM_CNTL__SPI_TIMING_RELAX_SCK_OVERRIDE__SHIFT 0x5 +#define ROM_CNTL__FOUR_BYTE_ADDRESS_MODE__SHIFT 0x6 +#define ROM_CNTL__DUMMY_CYCLE_NUM__SHIFT 0x8 +#define ROM_CNTL__SPI_TIMING_RELAX__SHIFT 0x13 +#define ROM_CNTL__SPI_TIMING_RELAX_OVERRIDE__SHIFT 0x14 +#define ROM_CNTL__SPI_FAST_MODE__SHIFT 0x15 +#define ROM_CNTL__SPI_FAST_MODE_OVERRIDE__SHIFT 0x16 +#define ROM_CNTL__SCK_PRESCALE_REFCLK__SHIFT 0x17 +#define ROM_CNTL__SCK_PRESCALE_REFCLK_OVERRIDE__SHIFT 0x1c +#define ROM_CNTL__ROM_INDEX_ADDRESS_AUTO_INCREASE__SHIFT 0x1d +#define ROM_CNTL__PAD_SAMPLE_MODE__SHIFT 0x1e +#define ROM_CNTL__PAD_SAMPLE_MODE_OVERRIDE__SHIFT 0x1f +#define ROM_CNTL__CLOCK_GATING_EN_MASK 0x00000001L +#define ROM_CNTL__SPI_TIMING_RELAX_MASK 0x00080000L +#define ROM_CNTL__SPI_TIMING_RELAX_OVERRIDE_MASK 0x00100000L +#define ROM_CNTL__SPI_FAST_MODE_MASK 0x00200000L +#define ROM_CNTL__SPI_FAST_MODE_OVERRIDE_MASK 0x00400000L +#define ROM_CNTL__SCK_PRESCALE_REFCLK_MASK 0x0F800000L +#define ROM_CNTL__SCK_PRESCALE_REFCLK_OVERRIDE_MASK 0x10000000L +//PAGE_MIRROR_CNTL +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR__SHIFT 0x0 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE__SHIFT 0x19 +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE__SHIFT 0x1a +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE__SHIFT 0x1c +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_BASE_ADDR_MASK 0x01FFFFFFL +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_ENABLE_MASK 0x02000000L +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_USAGE_MASK 0x0C000000L +#define PAGE_MIRROR_CNTL__PAGE_MIRROR_INVALIDATE_MASK 0x10000000L +//ROM_STATUS +#define ROM_STATUS__ROM_BUSY__SHIFT 0x0 +#define ROM_STATUS__ROM_BUSY_MASK 0x00000001L +//CGTT_ROM_CLK_CTRL0 +#define CGTT_ROM_CLK_CTRL0__ON_DELAY__SHIFT 0x0 +#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS__SHIFT 0x4 +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1__SHIFT 0x1e +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0__SHIFT 0x1f +#define CGTT_ROM_CLK_CTRL0__ON_DELAY_MASK 0x0000000FL +#define CGTT_ROM_CLK_CTRL0__OFF_HYSTERESIS_MASK 0x00000FF0L +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK 0x40000000L +#define CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK 0x80000000L +//ROM_INDEX +#define ROM_INDEX__ROM_INDEX__SHIFT 0x0 +#define ROM_INDEX__ROM_INDEX_MASK 0x01FFFFFFL +//ROM_DATA +#define ROM_DATA__ROM_DATA__SHIFT 0x0 +#define ROM_DATA__ROM_DATA_MASK 0xFFFFFFFFL +//ROM_START +#define ROM_START__ROM_START__SHIFT 0x0 +#define ROM_START__ROM_START_MASK 0x01FFFFFFL +//ROM_SW_CNTL +#define ROM_SW_CNTL__DATA_SIZE__SHIFT 0x0 +#define ROM_SW_CNTL__COMMAND_SIZE__SHIFT 0x10 +#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE__SHIFT 0x13 +#define ROM_SW_CNTL__DATA_SIZE_MASK 0x0000FFFFL +#define ROM_SW_CNTL__COMMAND_SIZE_MASK 0x00070000L +#define ROM_SW_CNTL__ROM_SW_RETURN_DATA_ENABLE_MASK 0x00080000L +//ROM_SW_STATUS +#define ROM_SW_STATUS__ROM_SW_DONE__SHIFT 0x0 +#define ROM_SW_STATUS__ROM_SW_DONE_MASK 0x00000001L +//ROM_SW_COMMAND +#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION__SHIFT 0x0 +#define ROM_SW_COMMAND__ROM_SW_ADDRESS__SHIFT 0x8 +#define ROM_SW_COMMAND__ROM_SW_INSTRUCTION_MASK 0x000000FFL +#define ROM_SW_COMMAND__ROM_SW_ADDRESS_MASK 0xFFFFFF00L +//ROM_SW_DATA_1 +#define ROM_SW_DATA_1__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_1__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_2 +#define ROM_SW_DATA_2__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_2__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_3 +#define ROM_SW_DATA_3__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_3__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_4 +#define ROM_SW_DATA_4__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_4__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_5 +#define ROM_SW_DATA_5__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_5__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_6 +#define ROM_SW_DATA_6__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_6__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_7 +#define ROM_SW_DATA_7__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_7__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_8 +#define ROM_SW_DATA_8__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_8__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_9 +#define ROM_SW_DATA_9__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_9__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_10 +#define ROM_SW_DATA_10__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_10__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_11 +#define ROM_SW_DATA_11__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_11__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_12 +#define ROM_SW_DATA_12__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_12__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_13 +#define ROM_SW_DATA_13__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_13__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_14 +#define ROM_SW_DATA_14__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_14__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_15 +#define ROM_SW_DATA_15__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_15__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_16 +#define ROM_SW_DATA_16__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_16__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_17 +#define ROM_SW_DATA_17__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_17__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_18 +#define ROM_SW_DATA_18__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_18__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_19 +#define ROM_SW_DATA_19__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_19__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_20 +#define ROM_SW_DATA_20__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_20__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_21 +#define ROM_SW_DATA_21__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_21__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_22 +#define ROM_SW_DATA_22__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_22__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_23 +#define ROM_SW_DATA_23__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_23__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_24 +#define ROM_SW_DATA_24__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_24__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_25 +#define ROM_SW_DATA_25__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_25__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_26 +#define ROM_SW_DATA_26__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_26__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_27 +#define ROM_SW_DATA_27__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_27__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_28 +#define ROM_SW_DATA_28__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_28__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_29 +#define ROM_SW_DATA_29__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_29__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_30 +#define ROM_SW_DATA_30__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_30__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_31 +#define ROM_SW_DATA_31__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_31__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_32 +#define ROM_SW_DATA_32__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_32__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_33 +#define ROM_SW_DATA_33__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_33__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_34 +#define ROM_SW_DATA_34__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_34__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_35 +#define ROM_SW_DATA_35__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_35__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_36 +#define ROM_SW_DATA_36__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_36__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_37 +#define ROM_SW_DATA_37__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_37__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_38 +#define ROM_SW_DATA_38__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_38__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_39 +#define ROM_SW_DATA_39__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_39__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_40 +#define ROM_SW_DATA_40__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_40__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_41 +#define ROM_SW_DATA_41__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_41__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_42 +#define ROM_SW_DATA_42__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_42__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_43 +#define ROM_SW_DATA_43__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_43__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_44 +#define ROM_SW_DATA_44__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_44__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_45 +#define ROM_SW_DATA_45__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_45__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_46 +#define ROM_SW_DATA_46__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_46__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_47 +#define ROM_SW_DATA_47__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_47__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_48 +#define ROM_SW_DATA_48__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_48__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_49 +#define ROM_SW_DATA_49__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_49__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_50 +#define ROM_SW_DATA_50__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_50__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_51 +#define ROM_SW_DATA_51__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_51__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_52 +#define ROM_SW_DATA_52__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_52__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_53 +#define ROM_SW_DATA_53__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_53__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_54 +#define ROM_SW_DATA_54__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_54__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_55 +#define ROM_SW_DATA_55__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_55__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_56 +#define ROM_SW_DATA_56__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_56__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_57 +#define ROM_SW_DATA_57__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_57__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_58 +#define ROM_SW_DATA_58__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_58__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_59 +#define ROM_SW_DATA_59__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_59__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_60 +#define ROM_SW_DATA_60__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_60__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_61 +#define ROM_SW_DATA_61__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_61__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_62 +#define ROM_SW_DATA_62__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_62__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_63 +#define ROM_SW_DATA_63__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_63__ROM_SW_DATA_MASK 0xFFFFFFFFL +//ROM_SW_DATA_64 +#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0 +#define ROM_SW_DATA_64__ROM_SW_DATA_MASK 0xFFFFFFFFL + + +// addressBlock: smuio_smuio_gpio_SmuSmuioDec +//SMU_GPIOPAD_SW_INT_STAT +#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_SW_INT_STAT__SW_INT_STAT_MASK 0x00000001L +//SMU_GPIOPAD_MASK +#define SMU_GPIOPAD_MASK__GPIO_MASK__SHIFT 0x0 +#define SMU_GPIOPAD_MASK__GPIO_MASK_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_A +#define SMU_GPIOPAD_A__GPIO_A__SHIFT 0x0 +#define SMU_GPIOPAD_A__GPIO_A_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_TXIMPSEL +#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL__SHIFT 0x0 +#define SMU_GPIOPAD_TXIMPSEL__GPIO_TXIMPSEL_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_EN +#define SMU_GPIOPAD_EN__GPIO_EN__SHIFT 0x0 +#define SMU_GPIOPAD_EN__GPIO_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_Y +#define SMU_GPIOPAD_Y__GPIO_Y__SHIFT 0x0 +#define SMU_GPIOPAD_Y__GPIO_Y_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_RXEN +#define SMU_GPIOPAD_RXEN__GPIO_RXEN__SHIFT 0x0 +#define SMU_GPIOPAD_RXEN__GPIO_RXEN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_RCVR_SEL0 +#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0__SHIFT 0x0 +#define SMU_GPIOPAD_RCVR_SEL0__GPIO_RCVR_SEL0_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_RCVR_SEL1 +#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1__SHIFT 0x0 +#define SMU_GPIOPAD_RCVR_SEL1__GPIO_RCVR_SEL1_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_PU_EN +#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN__SHIFT 0x0 +#define SMU_GPIOPAD_PU_EN__GPIO_PU_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_PD_EN +#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN__SHIFT 0x0 +#define SMU_GPIOPAD_PD_EN__GPIO_PD_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_PINSTRAPS +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0__SHIFT 0x0 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1__SHIFT 0x1 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2__SHIFT 0x2 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3__SHIFT 0x3 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4__SHIFT 0x4 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5__SHIFT 0x5 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6__SHIFT 0x6 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7__SHIFT 0x7 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8__SHIFT 0x8 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9__SHIFT 0x9 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10__SHIFT 0xa +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11__SHIFT 0xb +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12__SHIFT 0xc +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13__SHIFT 0xd +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14__SHIFT 0xe +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15__SHIFT 0xf +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16__SHIFT 0x10 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17__SHIFT 0x11 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18__SHIFT 0x12 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19__SHIFT 0x13 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20__SHIFT 0x14 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21__SHIFT 0x15 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22__SHIFT 0x16 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23__SHIFT 0x17 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24__SHIFT 0x18 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25__SHIFT 0x19 +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26__SHIFT 0x1a +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27__SHIFT 0x1b +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28__SHIFT 0x1c +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29__SHIFT 0x1d +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30__SHIFT 0x1e +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_0_MASK 0x00000001L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_1_MASK 0x00000002L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_2_MASK 0x00000004L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_3_MASK 0x00000008L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_4_MASK 0x00000010L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_5_MASK 0x00000020L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_6_MASK 0x00000040L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_7_MASK 0x00000080L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_8_MASK 0x00000100L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_9_MASK 0x00000200L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_10_MASK 0x00000400L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_11_MASK 0x00000800L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_12_MASK 0x00001000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_13_MASK 0x00002000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_14_MASK 0x00004000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_15_MASK 0x00008000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_16_MASK 0x00010000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_17_MASK 0x00020000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_18_MASK 0x00040000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_19_MASK 0x00080000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_20_MASK 0x00100000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_21_MASK 0x00200000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_22_MASK 0x00400000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_23_MASK 0x00800000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_24_MASK 0x01000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_25_MASK 0x02000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_26_MASK 0x04000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_27_MASK 0x08000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_28_MASK 0x10000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_29_MASK 0x20000000L +#define SMU_GPIOPAD_PINSTRAPS__GPIO_PINSTRAP_30_MASK 0x40000000L +//DFT_PINSTRAPS +#define DFT_PINSTRAPS__DFT_PINSTRAPS__SHIFT 0x0 +#define DFT_PINSTRAPS__DFT_PINSTRAPS_MASK 0x000000FFL +//SMU_GPIOPAD_INT_STAT_EN +#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN__SHIFT 0x0 +#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN__SHIFT 0x1f +#define SMU_GPIOPAD_INT_STAT_EN__GPIO_INT_STAT_EN_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_STAT_EN__SW_INITIATED_INT_STAT_EN_MASK 0x80000000L +//SMU_GPIOPAD_INT_STAT +#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT__SHIFT 0x1f +#define SMU_GPIOPAD_INT_STAT__GPIO_INT_STAT_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_STAT__SW_INITIATED_INT_STAT_MASK 0x80000000L +//SMU_GPIOPAD_INT_STAT_AK +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0__SHIFT 0x0 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1__SHIFT 0x1 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2__SHIFT 0x2 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3__SHIFT 0x3 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4__SHIFT 0x4 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5__SHIFT 0x5 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6__SHIFT 0x6 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7__SHIFT 0x7 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8__SHIFT 0x8 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9__SHIFT 0x9 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10__SHIFT 0xa +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11__SHIFT 0xb +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12__SHIFT 0xc +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13__SHIFT 0xd +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14__SHIFT 0xe +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15__SHIFT 0xf +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16__SHIFT 0x10 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17__SHIFT 0x11 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18__SHIFT 0x12 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19__SHIFT 0x13 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20__SHIFT 0x14 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21__SHIFT 0x15 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22__SHIFT 0x16 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23__SHIFT 0x17 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24__SHIFT 0x18 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25__SHIFT 0x19 +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26__SHIFT 0x1a +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27__SHIFT 0x1b +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28__SHIFT 0x1c +#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK__SHIFT 0x1f +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_0_MASK 0x00000001L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_1_MASK 0x00000002L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_2_MASK 0x00000004L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_3_MASK 0x00000008L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_4_MASK 0x00000010L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_5_MASK 0x00000020L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_6_MASK 0x00000040L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_7_MASK 0x00000080L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_8_MASK 0x00000100L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_9_MASK 0x00000200L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_10_MASK 0x00000400L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_11_MASK 0x00000800L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_12_MASK 0x00001000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_13_MASK 0x00002000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_14_MASK 0x00004000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_15_MASK 0x00008000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_16_MASK 0x00010000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_17_MASK 0x00020000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_18_MASK 0x00040000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_19_MASK 0x00080000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_20_MASK 0x00100000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_21_MASK 0x00200000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_22_MASK 0x00400000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_23_MASK 0x00800000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_24_MASK 0x01000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_25_MASK 0x02000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_26_MASK 0x04000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_27_MASK 0x08000000L +#define SMU_GPIOPAD_INT_STAT_AK__GPIO_INT_STAT_AK_28_MASK 0x10000000L +#define SMU_GPIOPAD_INT_STAT_AK__SW_INITIATED_INT_STAT_AK_MASK 0x80000000L +//SMU_GPIOPAD_INT_EN +#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN__SHIFT 0x0 +#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN__SHIFT 0x1f +#define SMU_GPIOPAD_INT_EN__GPIO_INT_EN_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_EN__SW_INITIATED_INT_EN_MASK 0x80000000L +//SMU_GPIOPAD_INT_TYPE +#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE__SHIFT 0x0 +#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE__SHIFT 0x1f +#define SMU_GPIOPAD_INT_TYPE__GPIO_INT_TYPE_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_TYPE__SW_INITIATED_INT_TYPE_MASK 0x80000000L +//SMU_GPIOPAD_INT_POLARITY +#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY__SHIFT 0x0 +#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY__SHIFT 0x1f +#define SMU_GPIOPAD_INT_POLARITY__GPIO_INT_POLARITY_MASK 0x1FFFFFFFL +#define SMU_GPIOPAD_INT_POLARITY__SW_INITIATED_INT_POLARITY_MASK 0x80000000L +//SMUIO_PCC_GPIO_SELECT +#define SMUIO_PCC_GPIO_SELECT__GPIO__SHIFT 0x0 +#define SMUIO_PCC_GPIO_SELECT__GPIO_MASK 0xFFFFFFFFL +//SMU_GPIOPAD_S0 +#define SMU_GPIOPAD_S0__GPIO_S0__SHIFT 0x0 +#define SMU_GPIOPAD_S0__GPIO_S0_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_S1 +#define SMU_GPIOPAD_S1__GPIO_S1__SHIFT 0x0 +#define SMU_GPIOPAD_S1__GPIO_S1_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_SCHMEN +#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN__SHIFT 0x0 +#define SMU_GPIOPAD_SCHMEN__GPIO_SCHMEN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_SCL_EN +#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN__SHIFT 0x0 +#define SMU_GPIOPAD_SCL_EN__GPIO_SCL_EN_MASK 0x7FFFFFFFL +//SMU_GPIOPAD_SDA_EN +#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN__SHIFT 0x0 +#define SMU_GPIOPAD_SDA_EN__GPIO_SDA_EN_MASK 0x7FFFFFFFL +//SMUIO_GPIO_INT0_SELECT +#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT0_SELECT__GPIO_INT0_SELECT_MASK 0xFFFFFFFFL +//SMUIO_GPIO_INT1_SELECT +#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT1_SELECT__GPIO_INT1_SELECT_MASK 0xFFFFFFFFL +//SMUIO_GPIO_INT2_SELECT +#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT2_SELECT__GPIO_INT2_SELECT_MASK 0xFFFFFFFFL +//SMUIO_GPIO_INT3_SELECT +#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT__SHIFT 0x0 +#define SMUIO_GPIO_INT3_SELECT__GPIO_INT3_SELECT_MASK 0xFFFFFFFFL +//SMU_GPIOPAD_MP_INT0_STAT +#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT0_STAT__GPIO_MP_INT0_STAT_MASK 0x1FFFFFFFL +//SMU_GPIOPAD_MP_INT1_STAT +#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT1_STAT__GPIO_MP_INT1_STAT_MASK 0x1FFFFFFFL +//SMU_GPIOPAD_MP_INT2_STAT +#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT2_STAT__GPIO_MP_INT2_STAT_MASK 0x1FFFFFFFL +//SMU_GPIOPAD_MP_INT3_STAT +#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT__SHIFT 0x0 +#define SMU_GPIOPAD_MP_INT3_STAT__GPIO_MP_INT3_STAT_MASK 0x1FFFFFFFL +//SMIO_INDEX +#define SMIO_INDEX__SW_SMIO_INDEX__SHIFT 0x0 +#define SMIO_INDEX__SW_SMIO_INDEX_MASK 0x00000001L +//S0_VID_SMIO_CNTL +#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES__SHIFT 0x0 +#define S0_VID_SMIO_CNTL__S0_SMIO_VALUES_MASK 0xFFFFFFFFL +//S1_VID_SMIO_CNTL +#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES__SHIFT 0x0 +#define S1_VID_SMIO_CNTL__S1_SMIO_VALUES_MASK 0xFFFFFFFFL +//OPEN_DRAIN_SELECT +#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT__SHIFT 0x0 +#define OPEN_DRAIN_SELECT__RESERVED__SHIFT 0x1f +#define OPEN_DRAIN_SELECT__OPEN_DRAIN_SELECT_MASK 0x7FFFFFFFL +#define OPEN_DRAIN_SELECT__RESERVED_MASK 0x80000000L +//SMIO_ENABLE +#define SMIO_ENABLE__SMIO_ENABLE__SHIFT 0x0 +#define SMIO_ENABLE__SMIO_ENABLE_MASK 0xFFFFFFFFL + +#endif -- cgit v1.2.3-70-g09d2 From 2d93151de8908b9f7db954297868fe02bc07041d Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 17 Mar 2024 18:34:03 +0800 Subject: drm/amdgpu: Add smuio v14_0_2 ip block support Add smuio v14_0_2 ip block support Signed-off-by: Hawking Zhang Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 ++- drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c | 41 ++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h | 30 ++++++++++++++++++++++ 3 files changed, 73 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c create mode 100644 drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index f24f11ac3e92..535e3936cfe0 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -248,7 +248,8 @@ amdgpu-y += \ smuio_v11_0_6.o \ smuio_v13_0.o \ smuio_v13_0_3.o \ - smuio_v13_0_6.o + smuio_v13_0_6.o \ + smuio_v14_0_2.o # add reset block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c new file mode 100644 index 000000000000..7b7cca220b26 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c @@ -0,0 +1,41 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smuio_v14_0_2.h" +#include "smuio/smuio_14_0_2_offset.h" +#include "smuio/smuio_14_0_2_sh_mask.h" + +static u32 smuio_v14_0_2_get_rom_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, regROM_INDEX); +} + +static u32 smuio_v14_0_2_get_rom_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, regROM_DATA); +} + +const struct amdgpu_smuio_funcs smuio_v14_0_2_funcs = { + .get_rom_index_offset = smuio_v14_0_2_get_rom_index_offset, + .get_rom_data_offset = smuio_v14_0_2_get_rom_data_offset, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h new file mode 100644 index 000000000000..6e617f832d90 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h @@ -0,0 +1,30 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMUIO_V14_0_2_H__ +#define __SMUIO_V14_0_2_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_smuio_funcs smuio_v14_0_2_funcs; + +#endif /* __SMUIO_V14_0_2_H__ */ -- cgit v1.2.3-70-g09d2 From d80e44a34e25a33a15c510a74575377f66a24b1e Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 17 Mar 2024 19:07:11 +0800 Subject: drm/amdgpu: Add smuio callback to get gpu clk counter Add smuio callback to get gpu clk counter Signed-off-by: Hawking Zhang Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h | 1 + drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h index ff4435181055..ec9d12f85f39 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h @@ -44,6 +44,7 @@ struct amdgpu_smuio_funcs { u32 (*get_socket_id)(struct amdgpu_device *adev); enum amdgpu_pkg_type (*get_pkg_type)(struct amdgpu_device *adev); bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev); + u64 (*get_gpu_clock_counter)(struct amdgpu_device *adev); }; struct amdgpu_smuio { diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c index 7b7cca220b26..2a51a70d4846 100644 --- a/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c @@ -24,6 +24,7 @@ #include "smuio_v14_0_2.h" #include "smuio/smuio_14_0_2_offset.h" #include "smuio/smuio_14_0_2_sh_mask.h" +#include static u32 smuio_v14_0_2_get_rom_index_offset(struct amdgpu_device *adev) { @@ -35,7 +36,27 @@ static u32 smuio_v14_0_2_get_rom_data_offset(struct amdgpu_device *adev) return SOC15_REG_OFFSET(SMUIO, 0, regROM_DATA); } +static u64 smuio_v14_0_2_get_gpu_clock_counter(struct amdgpu_device *adev) +{ + u64 clock; + u64 clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after; + + preempt_disable(); + clock_counter_hi_pre = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); + clock_counter_lo = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); + /* the clock counter may be udpated during polling the counters */ + clock_counter_hi_after = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER); + if (clock_counter_hi_pre != clock_counter_hi_after) + clock_counter_lo = (u64)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER); + preempt_enable(); + + clock = clock_counter_lo | (clock_counter_hi_after << 32ULL); + + return clock; +} + const struct amdgpu_smuio_funcs smuio_v14_0_2_funcs = { .get_rom_index_offset = smuio_v14_0_2_get_rom_index_offset, .get_rom_data_offset = smuio_v14_0_2_get_rom_data_offset, + .get_gpu_clock_counter = smuio_v14_0_2_get_gpu_clock_counter, }; -- cgit v1.2.3-70-g09d2 From a61e2ce9d42513798df5ada7dd1397636903a005 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 17 Mar 2024 19:09:11 +0800 Subject: drm/amdgpu: Enable smuio v14_0_2 callbacks Enable smuio v14_0_2_callbacks Signed-off-by: Hawking Zhang Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 8d9d6b2e68e8..230412fc4d62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -97,6 +97,7 @@ #include "smuio_v13_0.h" #include "smuio_v13_0_3.h" #include "smuio_v13_0_6.h" +#include "smuio_v14_0_2.h" #include "vcn_v5_0_0.h" #include "jpeg_v5_0_0.h" @@ -2684,6 +2685,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(14, 0, 1): adev->smuio.funcs = &smuio_v13_0_6_funcs; break; + case IP_VERSION(14, 0, 2): + adev->smuio.funcs = &smuio_v14_0_2_funcs; + break; default: break; } -- cgit v1.2.3-70-g09d2 From fb734632070099afde3a897154a567534d07149d Mon Sep 17 00:00:00 2001 From: lima1002 Date: Thu, 25 Jan 2024 15:53:16 +0800 Subject: drm/amd/swsmu: add smu 14.0.1 vcn and jpeg msg add new vcn and jpeg msg v2: squash in updates (Alex) v3: rework code for better compat with other smu14.x variants (Alex) Reviewed-by: Alex Deucher Signed-off-by: lima1002 Signed-off-by: Alex Deucher --- .../amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h | 28 ++++++------ drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h | 10 +++++ drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 50 +++++++++++++++++++--- .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c | 21 ++++++--- 4 files changed, 82 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h index 8a8a57c56bc0..ca7ce4251482 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h @@ -54,14 +54,14 @@ #define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team #define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version #define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version -#define PPSMC_MSG_SPARE0 0x04 ///< SPARE -#define PPSMC_MSG_SPARE1 0x05 ///< SPARE -#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN -#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default -#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display +#define PPSMC_MSG_PowerDownVcn1 0x04 ///< Power down VCN1 +#define PPSMC_MSG_PowerUpVcn1 0x05 ///< Power up VCN1; VCN1 is power gated by default +#define PPSMC_MSG_PowerDownVcn0 0x06 ///< Power down VCN0 +#define PPSMC_MSG_PowerUpVcn0 0x07 ///< Power up VCN0; VCN0 is power gated by default +#define PPSMC_MSG_SetHardMinVcn0 0x08 ///< For wireless display #define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz -#define PPSMC_MSG_SPARE2 0x0A ///< SPARE -#define PPSMC_MSG_SPARE3 0x0B ///< SPARE +#define PPSMC_MSG_SetHardMinVcn1 0x0A ///< For wireless display +#define PPSMC_MSG_SetSoftMinVcn1 0x0B ///< Set soft min for VCN1 clocks (VCLK1 and DCLK1) #define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload #define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer #define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer @@ -71,7 +71,7 @@ #define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW #define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK -#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) +#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0) #define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU @@ -84,17 +84,17 @@ #define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK #define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK -#define PPSMC_MSG_SetSoftMaxVcn 0x1F ///< Set soft max for VCN clocks (VCLK and DCLK) +#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0) #define PPSMC_MSG_spare_0x20 0x20 -#define PPSMC_MSG_PowerDownJpeg 0x21 ///< Power down Jpeg -#define PPSMC_MSG_PowerUpJpeg 0x22 ///< Power up Jpeg; VCN is power gated by default +#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0 +#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default #define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK #define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity -#define PPSMC_MSG_Reserved 0x26 ///< Not used -#define PPSMC_MSG_Reserved1 0x27 ///< Not used, previously PPSMC_MSG_RequestActiveWgp -#define PPSMC_MSG_Reserved2 0x28 ///< Not used, previously PPSMC_MSG_QueryActiveWgp +#define PPSMC_MSG_PowerDownJpeg1 0x26 ///< Power down Jpeg of VCN1 +#define PPSMC_MSG_PowerUpJpeg1 0x27 ///< Power up Jpeg of VCN1; VCN1 is power gated by default +#define PPSMC_MSG_SetSoftMaxVcn1 0x28 ///< Set soft max for VCN1 clocks (VCLK1 and DCLK1) #define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default #define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM #define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index a941fdbf78b6..af427cc7dbb8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -115,6 +115,10 @@ __SMU_DUMMY_MAP(PowerDownVcn), \ __SMU_DUMMY_MAP(PowerUpJpeg), \ __SMU_DUMMY_MAP(PowerDownJpeg), \ + __SMU_DUMMY_MAP(PowerUpJpeg0), \ + __SMU_DUMMY_MAP(PowerDownJpeg0), \ + __SMU_DUMMY_MAP(PowerUpJpeg1), \ + __SMU_DUMMY_MAP(PowerDownJpeg1), \ __SMU_DUMMY_MAP(BacoAudioD3PME), \ __SMU_DUMMY_MAP(ArmD3), \ __SMU_DUMMY_MAP(RunDcBtc), \ @@ -135,6 +139,8 @@ __SMU_DUMMY_MAP(PowerUpSdma), \ __SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \ __SMU_DUMMY_MAP(SetHardMinVcn), \ + __SMU_DUMMY_MAP(SetHardMinVcn0), \ + __SMU_DUMMY_MAP(SetHardMinVcn1), \ __SMU_DUMMY_MAP(SetAllowFclkSwitch), \ __SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \ __SMU_DUMMY_MAP(ActiveProcessNotify), \ @@ -150,6 +156,8 @@ __SMU_DUMMY_MAP(SetPhyclkVoltageByFreq), \ __SMU_DUMMY_MAP(SetDppclkVoltageByFreq), \ __SMU_DUMMY_MAP(SetSoftMinVcn), \ + __SMU_DUMMY_MAP(SetSoftMinVcn0), \ + __SMU_DUMMY_MAP(SetSoftMinVcn1), \ __SMU_DUMMY_MAP(EnablePostCode), \ __SMU_DUMMY_MAP(GetGfxclkFrequency), \ __SMU_DUMMY_MAP(GetFclkFrequency), \ @@ -161,6 +169,8 @@ __SMU_DUMMY_MAP(SetSoftMaxSocclkByFreq), \ __SMU_DUMMY_MAP(SetSoftMaxFclkByFreq), \ __SMU_DUMMY_MAP(SetSoftMaxVcn), \ + __SMU_DUMMY_MAP(SetSoftMaxVcn0), \ + __SMU_DUMMY_MAP(SetSoftMaxVcn1), \ __SMU_DUMMY_MAP(PowerGateMmHub), \ __SMU_DUMMY_MAP(UpdatePmeRestore), \ __SMU_DUMMY_MAP(GpuChangeState), \ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index b06a3cc43305..9e39f99154f9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1402,9 +1402,22 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu, if (adev->vcn.harvest_config & (1 << i)) continue; - ret = smu_cmn_send_smc_msg_with_param(smu, enable ? - SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, - i << 16U, NULL); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + if (i == 0) + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0, + i << 16U, NULL); + else if (i == 1) + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1, + i << 16U, NULL); + } else { + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, + i << 16U, NULL); + } + if (ret) return ret; } @@ -1415,9 +1428,34 @@ int smu_v14_0_set_vcn_enable(struct smu_context *smu, int smu_v14_0_set_jpeg_enable(struct smu_context *smu, bool enable) { - return smu_cmn_send_smc_msg_with_param(smu, enable ? - SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg, - 0, NULL); + struct amdgpu_device *adev = smu->adev; + int i, ret = 0; + + for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || + amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { + if (i == 0) + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0, + i << 16U, NULL); + else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1, + i << 16U, NULL); + } else { + ret = smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg, + i << 16U, NULL); + } + + if (ret) + return ret; + } + + return ret; } int smu_v14_0_run_btc(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 9310c4758e38..d6de6d97286c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -70,9 +70,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1), MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), - MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), - MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), - MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), + MSG_MAP(PowerDownVcn0, PPSMC_MSG_PowerDownVcn0, 1), + MSG_MAP(PowerUpVcn0, PPSMC_MSG_PowerUpVcn0, 1), + MSG_MAP(SetHardMinVcn0, PPSMC_MSG_SetHardMinVcn0, 1), + MSG_MAP(PowerDownVcn1, PPSMC_MSG_PowerDownVcn1, 1), + MSG_MAP(PowerUpVcn1, PPSMC_MSG_PowerUpVcn1, 1), + MSG_MAP(SetHardMinVcn1, PPSMC_MSG_SetHardMinVcn1, 1), MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 1), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), @@ -83,7 +86,8 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 1), - MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1), + MSG_MAP(SetSoftMinVcn0, PPSMC_MSG_SetSoftMinVcn0, 1), + MSG_MAP(SetSoftMinVcn1, PPSMC_MSG_SetSoftMinVcn1, 1), MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1), MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), @@ -91,9 +95,12 @@ static struct cmn2asic_msg_mapping smu_v14_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), - MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), - MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), - MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), + MSG_MAP(SetSoftMaxVcn0, PPSMC_MSG_SetSoftMaxVcn0, 1), + MSG_MAP(SetSoftMaxVcn1, PPSMC_MSG_SetSoftMaxVcn1, 1), + MSG_MAP(PowerDownJpeg0, PPSMC_MSG_PowerDownJpeg0, 1), + MSG_MAP(PowerUpJpeg0, PPSMC_MSG_PowerUpJpeg0, 1), + MSG_MAP(PowerDownJpeg1, PPSMC_MSG_PowerDownJpeg1, 1), + MSG_MAP(PowerUpJpeg1, PPSMC_MSG_PowerUpJpeg1, 1), MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1), MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1), -- cgit v1.2.3-70-g09d2 From b6c4f90b3819148b066154ac7ae5388232aa1773 Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Mon, 18 Mar 2024 11:54:13 +0100 Subject: drm/amdgpu: sync page table freeing with tlb flush MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The idea behind this patch is to delay the freeing of PT entry objects until the TLB flush is done. This patch: - Adds a tlb_flush_waitlist in amdgpu_vm_update_params which will keep the objects that need to be freed after tlb_flush. - Adds PT entries in this list in amdgpu_vm_ptes_update after finding the PT entry. - Changes functionality of amdgpu_vm_pt_free_dfs from (df_search + free) to simply freeing of the BOs, also renames it to amdgpu_vm_pt_free_list to reflect this same. - Exports function amdgpu_vm_pt_free_list to be called directly. - Calls amdgpu_vm_pt_free_list directly from amdgpu_vm_update_range. V2: rebase V4: Addressed review comments from Christian - add only locked PTEs entries in TLB flush waitlist. - do not create a separate function for list flush. - do not create a new lock for TLB flush. - there is no need to wait on tlb_flush_fence exclusively. V5: Addressed review comments from Christian - change the amdgpu_vm_pt_free_dfs's functionality to simple freeing of the objects and rename it. - add all the PTE objects in params->tlb_flush_waitlist - let amdgpu_vm_pt_free_root handle the freeing of BOs independently - call amdgpu_vm_pt_free_list directly V6: Rebase V7: Rebase V8: Added a NULL check to fix this backtrace issue: [ 415.351447] BUG: kernel NULL pointer dereference, address: 0000000000000008 [ 415.359245] #PF: supervisor write access in kernel mode [ 415.365081] #PF: error_code(0x0002) - not-present page [ 415.370817] PGD 101259067 P4D 101259067 PUD 10125a067 PMD 0 [ 415.377140] Oops: 0002 [#1] PREEMPT SMP NOPTI [ 415.382004] CPU: 0 PID: 25481 Comm: test_with_MPI.e Tainted: G OE 5.18.2-mi300-build-140423-ubuntu-22.04+ #24 [ 415.394437] Hardware name: AMD Corporation Sh51p/Sh51p, BIOS RMO1001AS 02/21/2024 [ 415.402797] RIP: 0010:amdgpu_vm_ptes_update+0x6fd/0xa10 [amdgpu] [ 415.409648] Code: 4c 89 ff 4d 8d 66 30 e8 f1 ed ff ff 48 85 db 74 42 48 39 5d a0 74 40 48 8b 53 20 48 8b 4b 18 48 8d 43 18 48 8d 75 b0 4c 89 ff <48 > 89 51 08 48 89 0a 49 8b 56 30 48 89 42 08 48 89 53 18 4c 89 63 [ 415.430621] RSP: 0018:ffffc9000401f990 EFLAGS: 00010287 [ 415.436456] RAX: ffff888147bb82f0 RBX: ffff888147bb82d8 RCX: 0000000000000000 [ 415.444426] RDX: 0000000000000000 RSI: ffffc9000401fa30 RDI: ffff888161f80000 [ 415.452397] RBP: ffffc9000401fa80 R08: 0000000000000000 R09: ffffc9000401fa00 [ 415.460368] R10: 00000007f0cc0000 R11: 00000007f0c85000 R12: ffffc9000401fb20 [ 415.468340] R13: 00000007f0d00000 R14: ffffc9000401faf0 R15: ffff888161f80000 [ 415.476312] FS: 00007f132ff89840(0000) GS:ffff889f87c00000(0000) knlGS:0000000000000000 [ 415.485350] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 415.491767] CR2: 0000000000000008 CR3: 0000000161d46003 CR4: 0000000000770ef0 [ 415.499738] PKRU: 55555554 [ 415.502750] Call Trace: [ 415.505482] [ 415.507825] amdgpu_vm_update_range+0x32a/0x880 [amdgpu] [ 415.513869] amdgpu_vm_clear_freed+0x117/0x250 [amdgpu] [ 415.519814] amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu+0x18c/0x250 [amdgpu] [ 415.527729] kfd_ioctl_unmap_memory_from_gpu+0xed/0x340 [amdgpu] [ 415.534551] kfd_ioctl+0x3b6/0x510 [amdgpu] V9: Addressed review comments from Christian - No NULL check reqd for root PT freeing - Free PT list regardless of needs_flush - Move adding BOs in list in a separate function V10: Added Christian's RB V11: squash in list fix Cc: Christian König Cc: Alex Deucher Cc: Felix Kuehling Cc: Rajneesh Bhardwaj Acked-by: Felix Kuehling Acked-by: Rajneesh Bhardwaj Reviewed-by: Christian König Tested-by: Rajneesh Bhardwaj Signed-off-by: Shashank Sharma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 7 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 66 ++++++++++++++++++++----------- 3 files changed, 53 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 317d2d468a6b..8af3f0fd3073 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -988,6 +988,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.unlocked = unlocked; params.needs_flush = flush_tlb; params.allow_override = allow_override; + INIT_LIST_HEAD(¶ms.tlb_flush_waitlist); /* Implicitly sync to command submissions in the same VM before * unmapping. Sync to moving fences before mapping. @@ -1078,6 +1079,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, tlb_cb = NULL; } + amdgpu_vm_pt_free_list(adev, ¶ms); + error_free: kfree(tlb_cb); amdgpu_vm_eviction_unlock(vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index b0a4fe683352..54d7da396de0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -266,6 +266,11 @@ struct amdgpu_vm_update_params { * to be overridden for NUMA local memory. */ bool allow_override; + + /** + * @tlb_flush_waitlist: temporary storage for BOs until tlb_flush + */ + struct list_head tlb_flush_waitlist; }; struct amdgpu_vm_update_funcs { @@ -547,6 +552,8 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, uint64_t start, uint64_t end, uint64_t dst, uint64_t flags); void amdgpu_vm_pt_free_work(struct work_struct *work); +void amdgpu_vm_pt_free_list(struct amdgpu_device *adev, + struct amdgpu_vm_update_params *params); #if defined(CONFIG_DEBUG_FS) void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 601df0ce8290..cbe0ae4c8738 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -622,40 +622,58 @@ void amdgpu_vm_pt_free_work(struct work_struct *work) } /** - * amdgpu_vm_pt_free_dfs - free PD/PT levels + * amdgpu_vm_pt_free_list - free PD/PT levels * * @adev: amdgpu device structure - * @vm: amdgpu vm structure - * @start: optional cursor where to start freeing PDs/PTs - * @unlocked: vm resv unlock status + * @params: see amdgpu_vm_update_params definition * - * Free the page directory or page table level and all sub levels. + * Free the page directory objects saved in the flush list */ -static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev, - struct amdgpu_vm *vm, - struct amdgpu_vm_pt_cursor *start, - bool unlocked) +void amdgpu_vm_pt_free_list(struct amdgpu_device *adev, + struct amdgpu_vm_update_params *params) { - struct amdgpu_vm_pt_cursor cursor; - struct amdgpu_vm_bo_base *entry; + struct amdgpu_vm_bo_base *entry, *next; + struct amdgpu_vm *vm = params->vm; + bool unlocked = params->unlocked; + + if (list_empty(¶ms->tlb_flush_waitlist)) + return; if (unlocked) { spin_lock(&vm->status_lock); - for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) - list_move(&entry->vm_status, &vm->pt_freed); - - if (start) - list_move(&start->entry->vm_status, &vm->pt_freed); + list_splice_init(¶ms->tlb_flush_waitlist, &vm->pt_freed); spin_unlock(&vm->status_lock); schedule_work(&vm->pt_free_work); return; } - for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) + list_for_each_entry_safe(entry, next, ¶ms->tlb_flush_waitlist, vm_status) amdgpu_vm_pt_free(entry); +} - if (start) - amdgpu_vm_pt_free(start->entry); +/** + * amdgpu_vm_pt_add_list - add PD/PT level to the flush list + * + * @params: parameters for the update + * @cursor: first PT entry to start DF search from, non NULL + * + * This list will be freed after TLB flush. + */ +static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params, + struct amdgpu_vm_pt_cursor *cursor) +{ + struct amdgpu_vm_pt_cursor seek; + struct amdgpu_vm_bo_base *entry; + + spin_lock(¶ms->vm->status_lock); + for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) { + if (entry && entry->bo) + list_move(&entry->vm_status, ¶ms->tlb_flush_waitlist); + } + + /* enter start node now */ + list_move(&cursor->entry->vm_status, ¶ms->tlb_flush_waitlist); + spin_unlock(¶ms->vm->status_lock); } /** @@ -667,7 +685,11 @@ static void amdgpu_vm_pt_free_dfs(struct amdgpu_device *adev, */ void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - amdgpu_vm_pt_free_dfs(adev, vm, NULL, false); + struct amdgpu_vm_pt_cursor cursor; + struct amdgpu_vm_bo_base *entry; + + for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) + amdgpu_vm_pt_free(entry); } /** @@ -973,9 +995,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, /* Make sure previous mapping is freed */ if (cursor.entry->bo) { params->needs_flush = true; - amdgpu_vm_pt_free_dfs(adev, params->vm, - &cursor, - params->unlocked); + amdgpu_vm_pt_add_list(params, &cursor); } amdgpu_vm_pt_next(adev, &cursor); } -- cgit v1.2.3-70-g09d2 From 02d377318291f4e88bfb91d482462f7e0928f255 Mon Sep 17 00:00:00 2001 From: Xiaojian Du Date: Fri, 8 Mar 2024 16:41:00 +0800 Subject: drm/amdgpu: add VCN sensor value for SMU 14.0.0 This will add VCN sensor value for SMU 14.0.0. Signed-off-by: Xiaojian Du Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index d6de6d97286c..ff6e464e40a6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -362,6 +362,12 @@ static int smu_v14_0_0_read_sensor(struct smu_context *smu, (uint32_t *)data); *size = 4; break; + case AMDGPU_PP_SENSOR_VCN_LOAD: + ret = smu_v14_0_0_get_smu_metrics_data(smu, + METRICS_AVERAGE_VCNACTIVITY, + (uint32_t *)data); + *size = 4; + break; case AMDGPU_PP_SENSOR_GPU_AVG_POWER: ret = smu_v14_0_0_get_smu_metrics_data(smu, METRICS_AVERAGE_SOCKETPOWER, -- cgit v1.2.3-70-g09d2 From 052af44ffae7728a7ac3965cde0e60331eac2e42 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Fri, 1 Mar 2024 19:52:53 -0500 Subject: drm/amd/display: fix debug key not working on dml2 [why] need to apply the debug key check for max displayclk. Reviewed-by: Chris Park Acked-by: Wayne Lin Signed-off-by: Charlene Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index f15d1dbad6a9..ac2676bb9fcb 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -374,10 +374,16 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dppclk_mhz * 1000; context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = in_ctx->v20.dml_core_ctx.states.state_array[in_ctx->v20.scratch.mode_support_params.out_lowest_state_idx].dispclk_mhz * 1000; + + if (dc->config.forced_clocks || dc->debug.max_disp_clk) { + context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz; + context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz ; + } } void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx) -- cgit v1.2.3-70-g09d2 From 7cc9196675234d4de0e1e19b9da1a8b86ecfeedd Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Mon, 4 Mar 2024 14:38:04 -0500 Subject: drm/amd/display: Power on VPG memory unconditionally if off [WHY&HOW] Even if memory lower power feature policy states that it is disabled, VPG memory should still be poweerd on if it is currently disabled when requested. Reviewed-by: Chris Park Acked-by: Wayne Lin Signed-off-by: Dillon Varone Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c index f1deb1c3c363..cfb923d85630 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c @@ -63,7 +63,12 @@ void vpg31_poweron(struct vpg *vpg) { struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg); - if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false) + uint32_t vpg_gsp_mem_pwr_state; + + REG_GET(VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, &vpg_gsp_mem_pwr_state); + + if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false && + vpg_gsp_mem_pwr_state == 0) return; REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0); -- cgit v1.2.3-70-g09d2 From 6619168fe1828e55d5de2a26262101ee2a508d73 Mon Sep 17 00:00:00 2001 From: Natanel Roizenman Date: Tue, 5 Mar 2024 20:26:54 -0500 Subject: drm/amd/display: Added debug prints for zstate_support and StutterPeriod Added debug prints for zstate_support and StutterPeriod in dcn35_decide_zstate_support for testing. Reviewed-by: Nicholas Kazlauskas Acked-by: Wayne Lin Signed-off-by: Natanel Roizenman Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 4d7bcda8ef72..9ac3bc6643c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -577,6 +577,7 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context) { enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW; unsigned int i, plane_count = 0; + DC_LOGGER_INIT(dc->ctx->logger); for (i = 0; i < dc->res_pool->pipe_count; i++) { if (context->res_ctx.pipe_ctx[i].plane_state) @@ -608,5 +609,8 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context) } + DC_LOG_SMU("zstate_support: %d, StutterPeriod: %d\n", support, + (int)context->bw_ctx.dml.vba.StutterPeriod); + context->bw_ctx.bw.dcn.clk.zstate_support = support; } -- cgit v1.2.3-70-g09d2 From e2680ee222d705597c4ff42c46e86b4bd3860d8e Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 12 Feb 2024 10:40:55 -0500 Subject: drm/amdkfd: Check cgroup when returning DMABuf info Check cgroup permissions when returning DMA-buf info and based on cgroup info return the GPU id of the GPU that have access to the BO. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index dfa8c69532d4..f9631f4b1a02 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1523,7 +1523,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, /* Find a KFD GPU device that supports the get_dmabuf_info query */ for (i = 0; kfd_topology_enum_kfd_devices(i, &dev) == 0; i++) - if (dev) + if (dev && !kfd_devcgroup_check_permission(dev)) break; if (!dev) return -EINVAL; @@ -1545,7 +1545,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, if (xcp_id >= 0) args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id; else - args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id; + args->gpu_id = dev->id; args->flags = flags; /* Copy metadata buffer to user mode */ -- cgit v1.2.3-70-g09d2 From f7bcfb7a56b2b6165daa9930429ec5bcd7e00e62 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 26 Feb 2024 16:30:46 +0800 Subject: drm/amdgpu: retrieve umc odecc error count for aca umc v12.0 retrieve umc odecc error count for aca umc v12.0 Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index d29ea2fde025..5f08d3dcf174 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -510,7 +510,8 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank struct amdgpu_device *adev = handle->adev; struct aca_bank_info info; enum aca_error_type err_type; - u64 status; + u64 status, count; + u32 ext_error_code; int ret; status = bank->regs[ACA_REG_IDX_STATUS]; @@ -527,7 +528,11 @@ static int umc_v12_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank if (ret) return ret; - return aca_error_cache_log_bank_error(handle, &info, err_type, 1ULL); + ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status); + count = ext_error_code == 0 ? + ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]) : 1ULL; + + return aca_error_cache_log_bank_error(handle, &info, err_type, count); } static const struct aca_bank_ops umc_v12_0_aca_bank_ops = { -- cgit v1.2.3-70-g09d2 From bd15bf742f6d869998d331f01496e8ed54bcf237 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Sun, 3 Mar 2024 19:01:23 +0800 Subject: drm/amdgpu: avoid update aca bank multi times during ras isr Because the UE Valid MCA count will only be cleared after reset, in order to avoid repeated counting of the error count, the aca bank is only updated once during ras isr. Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 27 +++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h | 1 + 2 files changed, 28 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 611c1751577a..53ad76f590a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -369,6 +369,26 @@ static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks * return 0; } +static bool aca_bank_should_update(struct amdgpu_device *adev, enum aca_smu_type type) +{ + struct amdgpu_aca *aca = &adev->aca; + bool ret = true; + + /* + * Because the UE Valid MCA count will only be cleared after reset, + * in order to avoid repeated counting of the error count, + * the aca bank is only updated once during the gpu recovery stage. + */ + if (type == ACA_SMU_TYPE_UE) { + if (amdgpu_ras_intr_triggered()) + ret = atomic_cmpxchg(&aca->ue_update_flag, 0, 1) == 0; + else + atomic_set(&aca->ue_update_flag, 0); + } + + return ret; +} + static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type, bank_handler_t handler, void *data) { @@ -380,6 +400,9 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type, if (list_empty(&aca->mgr.list)) return 0; + if (!aca_bank_should_update(adev, type)) + return 0; + ret = aca_smu_get_valid_aca_count(adev, type, &count); if (ret) return ret; @@ -670,6 +693,8 @@ int amdgpu_aca_init(struct amdgpu_device *adev) struct amdgpu_aca *aca = &adev->aca; int ret; + atomic_set(&aca->ue_update_flag, 0); + ret = aca_manager_init(&aca->mgr); if (ret) return ret; @@ -682,6 +707,8 @@ void amdgpu_aca_fini(struct amdgpu_device *adev) struct amdgpu_aca *aca = &adev->aca; aca_manager_fini(&aca->mgr); + + atomic_set(&aca->ue_update_flag, 0); } int amdgpu_aca_reset(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h index 6eb4c0341278..674a5a9da862 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -175,6 +175,7 @@ struct aca_smu_funcs { struct amdgpu_aca { struct aca_handle_manager mgr; const struct aca_smu_funcs *smu_funcs; + atomic_t ue_update_flag; bool is_enabled; }; -- cgit v1.2.3-70-g09d2 From 31fd330b97ba334e00ea6102dfb68565eeb7a23f Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 18 Mar 2024 09:32:36 +0800 Subject: drm/amdgpu: add ras event id support for ACA add ras event id support for ACA. Signed-off-by: Yang Wang Reviewed-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 29 ++++++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 11 ++++++----- 3 files changed, 23 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 53ad76f590a1..c3242dbf8355 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -116,20 +116,22 @@ static struct aca_regs_dump { {"CONTROL_MASK", ACA_REG_IDX_CTL_MASK}, }; -static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank) +static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank, + struct ras_query_context *qctx) { + u64 event_id = qctx ? qctx->event_id : 0ULL; int i; - dev_info(adev->dev, HW_ERR "Accelerator Check Architecture events logged\n"); + RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n"); /* plus 1 for output format, e.g: ACA[08/08]: xxxx */ for (i = 0; i < ARRAY_SIZE(aca_regs); i++) - dev_info(adev->dev, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", - idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); + RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n", + idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]); } static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type, int start, int count, - struct aca_banks *banks) + struct aca_banks *banks, struct ras_query_context *qctx) { struct amdgpu_aca *aca = &adev->aca; const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; @@ -165,7 +167,7 @@ static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_ bank.type = type; - aca_smu_bank_dump(adev, i, count, &bank); + aca_smu_bank_dump(adev, i, count, &bank, qctx); ret = aca_banks_add_bank(banks, &bank); if (ret) @@ -390,7 +392,7 @@ static bool aca_bank_should_update(struct amdgpu_device *adev, enum aca_smu_type } static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type, - bank_handler_t handler, void *data) + bank_handler_t handler, struct ras_query_context *qctx, void *data) { struct amdgpu_aca *aca = &adev->aca; struct aca_banks banks; @@ -412,7 +414,7 @@ static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type, aca_banks_init(&banks); - ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks); + ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks, qctx); if (ret) goto err_release_banks; @@ -489,7 +491,7 @@ out_unlock: } static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type, - struct ras_err_data *err_data) + struct ras_err_data *err_data, struct ras_query_context *qctx) { enum aca_smu_type smu_type; int ret; @@ -507,7 +509,7 @@ static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *h } /* udpate aca bank to aca source error_cache first */ - ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, NULL); + ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, qctx, NULL); if (ret) return ret; @@ -523,7 +525,7 @@ static bool aca_handle_is_valid(struct aca_handle *handle) } int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, - enum aca_error_type type, void *data) + enum aca_error_type type, void *data, void *qctx) { struct ras_err_data *err_data = (struct ras_err_data *)data; @@ -536,7 +538,8 @@ int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *han if (!(BIT(type) & handle->mask)) return 0; - return __aca_get_error_data(adev, handle, type, err_data); + return __aca_get_error_data(adev, handle, type, err_data, + (struct ras_query_context *)qctx); } static void aca_error_init(struct aca_error *aerr, enum aca_error_type type) @@ -853,7 +856,7 @@ static int aca_dump_show(struct seq_file *m, enum aca_smu_type type) .idx = 0, }; - return aca_banks_update(adev, type, handler_aca_bank_dump, (void *)&context); + return aca_banks_update(adev, type, handler_aca_bank_dump, NULL, (void *)&context); } static int aca_dump_ce_show(struct seq_file *m, void *unused) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h index 674a5a9da862..247968d6a925 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -198,7 +198,7 @@ int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle, const char *name, const struct aca_info *aca_info, void *data); void amdgpu_aca_remove_handle(struct aca_handle *handle); int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, - enum aca_error_type type, void *data); + enum aca_error_type type, void *data, void *qctx); int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en); void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root); int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 2bc02b809246..b8c7d0bf8fb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1269,7 +1269,8 @@ int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk) } static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk, - enum aca_error_type type, struct ras_err_data *err_data) + enum aca_error_type type, struct ras_err_data *err_data, + struct ras_query_context *qctx) { struct ras_manager *obj; @@ -1277,7 +1278,7 @@ static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu if (!obj) return -EINVAL; - return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data); + return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx); } ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr, @@ -1334,15 +1335,15 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, } } else { if (amdgpu_aca_is_enabled(adev)) { - ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data); + ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx); if (ret) return ret; - ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data); + ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx); if (ret) return ret; - ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data); + ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx); if (ret) return ret; } else { -- cgit v1.2.3-70-g09d2 From 6a3c243fdcf13adfaeb60a6d524104c937e3fdac Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Tue, 5 Mar 2024 16:42:28 -0500 Subject: drm/amd/display: change aux_init to apu version [why] APU has different refclk as dGPU which is used for AUX_DPHY setup Reviewed-by: Chris Park Acked-by: Wayne Lin Signed-off-by: Charlene Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h | 2 ++ drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index 26be5fee7411..b2cea59ba5d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -205,7 +205,7 @@ void dcn31_link_encoder_set_dio_phy_mux( } } -static void enc31_hw_init(struct link_encoder *enc) +void enc31_hw_init(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h index 221671563a0b..b5dfd404676a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.h @@ -283,4 +283,6 @@ bool dcn31_link_encoder_is_in_alt_mode( void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings); +void enc31_hw_init(struct link_encoder *enc); + #endif /* __DC_LINK_ENCODER__DCN31_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index 81e349d5835b..6e6ae3de08e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -119,7 +119,7 @@ void dcn35_link_encoder_setup( void dcn35_link_encoder_init(struct link_encoder *enc) { - enc32_hw_init(enc); + enc31_hw_init(enc); dcn35_link_encoder_set_fgcg(enc, enc->ctx->dc->debug.enable_fine_grain_clock_gating.bits.dio); } -- cgit v1.2.3-70-g09d2 From 414998f2a017b2463e84cfccb32a42af191f3780 Mon Sep 17 00:00:00 2001 From: Natanel Roizenman Date: Wed, 6 Mar 2024 12:38:54 -0500 Subject: drm/amd/display: Increase Z8 watermark times. Increase Z8 watermark times from 210->250us and 320->350us. Reviewed-by: Nicholas Kazlauskas Acked-by: Wayne Lin Signed-off-by: Natanel Roizenman Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c | 4 ++-- drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 9ac3bc6643c3..33ea89f20449 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -166,8 +166,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .num_states = 5, .sr_exit_time_us = 28.0, .sr_enter_plus_exit_time_us = 30.0, - .sr_exit_z8_time_us = 210.0, - .sr_enter_plus_exit_z8_time_us = 320.0, + .sr_exit_z8_time_us = 250.0, + .sr_enter_plus_exit_z8_time_us = 350.0, .fclk_change_latency_us = 24.0, .usr_retraining_latency_us = 2, .writeback_latency_us = 12.0, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index dc9e1b758ed6..7bd67f6b1595 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -145,8 +145,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .num_states = 5, .sr_exit_time_us = 28.0, .sr_enter_plus_exit_time_us = 30.0, - .sr_exit_z8_time_us = 210.0, - .sr_enter_plus_exit_z8_time_us = 320.0, + .sr_exit_z8_time_us = 250.0, + .sr_enter_plus_exit_z8_time_us = 350.0, .fclk_change_latency_us = 24.0, .usr_retraining_latency_us = 2, .writeback_latency_us = 12.0, -- cgit v1.2.3-70-g09d2 From f2afc8d4c3d08b450dcf957a253333a4e3ed65f9 Mon Sep 17 00:00:00 2001 From: Chris Park Date: Tue, 5 Mar 2024 17:41:15 -0500 Subject: drm/amd/display: Prevent crash when disable stream [Why] Disabling stream encoder invokes a function that no longer exists. [How] Check if the function declaration is NULL in disable stream encoder. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Charlene Liu Acked-by: Wayne Lin Signed-off-by: Chris Park Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 9d5df4c0da59..0ba1feaf96c0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1185,7 +1185,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) if (dccg) { dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); } } else if (dccg && dccg->funcs->disable_symclk_se) { dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, -- cgit v1.2.3-70-g09d2 From 398a16e1f03b6b583b94c7ab080aa38432fb0502 Mon Sep 17 00:00:00 2001 From: Xi Liu Date: Thu, 7 Mar 2024 11:51:56 -0500 Subject: drm/amd/display: increase bb clock for DCN351 [Why and how] Bounding box clocks for DCN351 should be increased as per request Reviewed-by: Swapnil Patel Acked-by: Wayne Lin Signed-off-by: Xi Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c | 90 ++++++++++++++++++---- 1 file changed, 76 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index 7bd67f6b1595..b6246406a042 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -98,51 +98,110 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .clock_limits = { { .state = 0, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, + .dcfclk_mhz = 400.0, + .fabricclk_mhz = 400.0, + .socclk_mhz = 600.0, + .dram_speed_mts = 3200.0, + .dispclk_mhz = 600.0, + .dppclk_mhz = 600.0, .phyclk_mhz = 600.0, .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 186.0, + .dscclk_mhz = 200.0, .dtbclk_mhz = 600.0, }, { .state = 1, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, + .dcfclk_mhz = 600.0, + .fabricclk_mhz = 1000.0, + .socclk_mhz = 733.0, + .dram_speed_mts = 6400.0, + .dispclk_mhz = 800.0, + .dppclk_mhz = 800.0, .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 209.0, + .dscclk_mhz = 266.7, .dtbclk_mhz = 600.0, }, { .state = 2, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, + .dcfclk_mhz = 738.0, + .fabricclk_mhz = 1200.0, + .socclk_mhz = 880.0, + .dram_speed_mts = 7500.0, + .dispclk_mhz = 800.0, + .dppclk_mhz = 800.0, .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 209.0, + .dscclk_mhz = 266.7, .dtbclk_mhz = 600.0, }, { .state = 3, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, + .dcfclk_mhz = 800.0, + .fabricclk_mhz = 1400.0, + .socclk_mhz = 978.0, + .dram_speed_mts = 7500.0, + .dispclk_mhz = 960.0, + .dppclk_mhz = 960.0, .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 371.0, + .dscclk_mhz = 320.0, .dtbclk_mhz = 600.0, }, { .state = 4, + .dcfclk_mhz = 873.0, + .fabricclk_mhz = 1600.0, + .socclk_mhz = 1100.0, + .dram_speed_mts = 8533.0, + .dispclk_mhz = 1066.7, + .dppclk_mhz = 1066.7, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 355.6, + .dtbclk_mhz = 600.0, + }, + { + .state = 5, + .dcfclk_mhz = 960.0, + .fabricclk_mhz = 1700.0, + .socclk_mhz = 1257.0, + .dram_speed_mts = 8533.0, .dispclk_mhz = 1200.0, .dppclk_mhz = 1200.0, .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 417.0, + .dscclk_mhz = 400.0, + .dtbclk_mhz = 600.0, + }, + { + .state = 6, + .dcfclk_mhz = 1067.0, + .fabricclk_mhz = 1850.0, + .socclk_mhz = 1257.0, + .dram_speed_mts = 8533.0, + .dispclk_mhz = 1371.4, + .dppclk_mhz = 1371.4, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 457.1, + .dtbclk_mhz = 600.0, + }, + { + .state = 7, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 2000.0, + .socclk_mhz = 1467.0, + .dram_speed_mts = 8533.0, + .dispclk_mhz = 1600.0, + .dppclk_mhz = 1600.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 533.3, .dtbclk_mhz = 600.0, }, }, - .num_states = 5, + .num_states = 8, .sr_exit_time_us = 28.0, .sr_enter_plus_exit_time_us = 30.0, .sr_exit_z8_time_us = 250.0, @@ -177,6 +236,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .do_urgent_latency_adjustment = 0, .urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .num_chans = 4, + .dram_clock_change_latency_us = 11.72, + .dispclk_dppclk_vco_speed_mhz = 2400.0, }; /* -- cgit v1.2.3-70-g09d2 From 93ddf00f874c26e078ba01010bc126f521d49e7f Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 26 Feb 2024 14:29:05 -0500 Subject: drm/amd/display: Detect and disallow idle reallow during reentrancy [Why] Cursor updates can be preempted by queued flips in some DMs. The synchronization model causes this to occur within the same thread at an intermediate level when we insert logs into the OS queue. Since this occurs on the same thread and we're still holding the lock (recursively) the cache is coherent. The exit sequence will run twice since we technically haven't finished the exit the first time, so we need a way to detect and avoid the reallow in the middle of this call to prevent the hang on the cursor update that was preempted. [How] Keep a counter that tracks the depth of the exit calls. Do not reallow until the counter is zero. Reviewed-by: Duncan Ma Acked-by: Wayne Lin Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 14 ++++++++++++-- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h | 1 + 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index f796ed061879..4878e9e50440 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1437,6 +1437,8 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_ */ if (!allow_idle) { + dc_dmub_srv->idle_exit_counter += 1; + dc_dmub_srv_exit_low_power_state(dc); /* * Idle is considered fully exited only after the sequence above @@ -1448,6 +1450,12 @@ void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_ * dm_execute_dmub_cmd submission instead of the "wake" helpers. */ dc_dmub_srv->idle_allowed = false; + + dc_dmub_srv->idle_exit_counter -= 1; + if (dc_dmub_srv->idle_exit_counter < 0) { + ASSERT(0); + dc_dmub_srv->idle_exit_counter = 0; + } } else { /* Consider idle as notified prior to the actual submission to * prevent multiple entries. */ @@ -1489,7 +1497,8 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in else result = dm_execute_dmub_cmd(ctx, cmd, wait_type); - if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) + if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && + !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; @@ -1538,7 +1547,8 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type); - if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle) + if (result && reallow_idle && dc_dmub_srv->idle_exit_counter == 0 && + !ctx->dc->debug.disable_dmub_reallow_idle) dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true); return result; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 60c93e9e3533..c0a512a12531 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -51,6 +51,7 @@ struct dc_dmub_srv { struct dc_context *ctx; void *dm; + int32_t idle_exit_counter; bool idle_allowed; bool needs_idle_wake; }; -- cgit v1.2.3-70-g09d2 From 8803bfffb7f81a6e62950ce772bd4d02b02b1adf Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Thu, 7 Mar 2024 14:50:00 -0500 Subject: drm/amd/display: Add optional optimization for IPS handshake [Why] It's possible to skip parts of the eval and exit sequencing if we know whether DCN is in IPS2 already or if it's committed to going to idle and not in IPS2. [How] Skip IPS2 entry/exit if DMCUB is idle but the IPS2 commit is not set. Skip the eval delay if DMCUB is already in IPS2 since we know we need to exit. These are turned off by default. Reviewed-by: Duncan Ma Acked-by: Wayne Lin Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 7 +++++-- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 3 ++- 3 files changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7b22c2efed77..d280f55ebe50 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -988,6 +988,7 @@ struct dc_debug_options { bool psp_disabled_wa; unsigned int ips2_eval_delay_us; unsigned int ips2_entry_delay_us; + bool optimize_ips_handshake; bool disable_dmub_reallow_idle; bool disable_timeout; bool disable_extblankadj; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 4878e9e50440..12c142cae78b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1318,13 +1318,16 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) */ dc_dmub_srv->needs_idle_wake = false; - if (prev_driver_signals.bits.allow_ips2) { + if (prev_driver_signals.bits.allow_ips2 && + (!dc->debug.optimize_ips_handshake || + ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { DC_LOG_IPS( "wait IPS2 eval (ips1_commit=%d ips2_commit=%d)", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); - udelay(dc->debug.ips2_eval_delay_us); + if (!dc->debug.optimize_ips_handshake || !ips_fw->signals.bits.ips2_commit) + udelay(dc->debug.ips2_eval_delay_us); if (ips_fw->signals.bits.ips2_commit) { DC_LOG_IPS( diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 7b807aea8aa7..818e5d87f0da 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -700,7 +700,8 @@ union dmub_shared_state_ips_fw_signals { struct { uint32_t ips1_commit : 1; /**< 1 if in IPS1 */ uint32_t ips2_commit : 1; /**< 1 if in IPS2 */ - uint32_t reserved_bits : 30; /**< Reversed */ + uint32_t in_idle : 1; /**< 1 if DMCUB is in idle */ + uint32_t reserved_bits : 29; /**< Reversed */ } bits; uint32_t all; }; -- cgit v1.2.3-70-g09d2 From 4cad092b989e11c66e747f03cd609a9ef74444ac Mon Sep 17 00:00:00 2001 From: Xi Liu Date: Thu, 7 Mar 2024 13:16:42 -0500 Subject: drm/amd/display: Remove unnecessary hard coded DPM states [Why] The hard coded DPM states are only used to fix mismatch states numbers from FW. [How] Remove when not needed. Reviewed-by: Sung joon Kim Acked-by: Wayne Lin Signed-off-by: Xi Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dml2/dml2_translation_helper.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 17a58f41fc6a..0a4dff45731f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -30,6 +30,10 @@ #define NUM_DCFCLK_STAS 5 +#if defined(CONFIG_DRM_AMD_DC_DCN3_51) +#define NUM_DCFCLK_STAS_NEW 8 +#endif + void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out) { switch (dml2->v20.dml_core_ctx.project) { @@ -254,11 +258,21 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch; struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params; unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS]; +#if defined(CONFIG_DRM_AMD_DC_DCN3_51) + unsigned int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW]; + unsigned int dml_project = dml2->v20.dml_core_ctx.project; +#endif unsigned int i = 0; unsigned int transactions_per_mem_clock = 16; // project specific, depends on used Memory type p->dcfclk_stas_mhz = dcfclk_stas_mhz; p->num_dcfclk_stas = NUM_DCFCLK_STAS; +#if defined(CONFIG_DRM_AMD_DC_DCN3_51) + if (dml_project == dml_project_dcn351) { + p->dcfclk_stas_mhz = dcfclk_stas_mhz_new; + p->num_dcfclk_stas = NUM_DCFCLK_STAS_NEW; + } +#endif p->in_bbox = in_bbox; p->out_states = out; p->in_states = &dml2->v20.scratch.create_scratch.in_states; @@ -436,8 +450,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, } dml2_policy_build_synthetic_soc_states(s, p); - if (dml2->v20.dml_core_ctx.project == dml_project_dcn35 || - dml2->v20.dml_core_ctx.project == dml_project_dcn351) { + if (dml2->v20.dml_core_ctx.project == dml_project_dcn35) { // Override last out_state with data from last in_state // This will ensure that out_state contains max fclk memcpy(&p->out_states->state_array[p->out_states->num_states - 1], -- cgit v1.2.3-70-g09d2 From e42e96360e2d96cb78b6a192899e536e2fefd0a7 Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Wed, 28 Feb 2024 16:55:32 -0500 Subject: drm/amd/display: Enable new interface design for alternate scrambling [why & how] To enable a new interface so alternate scrambling can be done via security module. Reviewed-by: Wenjing Liu Acked-by: Wayne Lin Signed-off-by: Sung Joon Kim Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 3 + .../display/dc/link/protocols/link_dp_training.c | 16 +---- .../dc/link/protocols/link_edp_panel_control.c | 64 ++++++++++++++++++++ .../dc/link/protocols/link_edp_panel_control.h | 2 + .../display/dc/resource/dcn351/dcn351_resource.c | 3 + drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 68 ++++++++++++++++++++++ 7 files changed, 142 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index d280f55ebe50..673f36543f6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -436,6 +436,7 @@ struct dc_config { unsigned int disable_ips_in_vpb; bool usb4_bw_alloc_support; bool allow_0_dtb_clk; + bool use_assr_psp_message; }; enum visual_confirm { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index a72de44a5747..9c0e2b9cffc9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -2285,6 +2285,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + enum dp_panel_mode panel_mode_dp = dp_get_panel_mode(link); DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); @@ -2311,6 +2312,8 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx) dc->hwss.disable_audio_stream(pipe_ctx); + edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false); + update_psp_stream_config(pipe_ctx, true); dc->hwss.blank_stream(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index e538c67d3ed9..9de5380757e0 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -1587,21 +1587,7 @@ bool perform_link_training_with_retries( msleep(delay_dp_power_up_in_ms); } - if (panel_mode == DP_PANEL_MODE_EDP) { - struct cp_psp *cp_psp = &stream->ctx->cp_psp; - - if (cp_psp && cp_psp->funcs.enable_assr) { - /* ASSR is bound to fail with unsigned PSP - * verstage used during devlopment phase. - * Report and continue with eDP panel mode to - * perform eDP link training with right settings - */ - bool result; - result = cp_psp->funcs.enable_assr(cp_psp->handle, link); - if (!result && link->panel_mode != DP_PANEL_MODE_EDP) - panel_mode = DP_PANEL_MODE_DEFAULT; - } - } + edp_set_panel_assr(link, pipe_ctx, &panel_mode, true); dp_set_panel_mode(link, panel_mode); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 3baa2bdd6dd6..0682dbbad448 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -38,6 +38,7 @@ #include "dc/dc_dmub_srv.h" #include "dce/dmub_replay.h" #include "abm.h" +#include "resource.h" #define DC_LOGGER \ link->ctx->logger #define DC_LOGGER_INIT(logger) @@ -1145,3 +1146,66 @@ int edp_get_target_backlight_pwm(const struct dc_link *link) return (int) abm->funcs->get_target_backlight(abm); } + +static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, + struct link_resource *link_res, bool enable) +{ + union dmub_rb_cmd cmd; + bool use_hpo_dp_link_enc = false; + uint8_t link_enc_index = 0; + uint8_t phy_type = 0; + uint8_t phy_id = 0; + + if (!pDC->config.use_assr_psp_message) + return; + + memset(&cmd, 0, sizeof(cmd)); + + link_enc_index = link->link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + if (link_res->hpo_dp_link_enc) { + link_enc_index = link_res->hpo_dp_link_enc->inst; + use_hpo_dp_link_enc = true; + } + + if (enable) + phy_type = ((dp_get_panel_mode(link) == DP_PANEL_MODE_EDP) ? 1 : 0); + + phy_id = resource_transmitter_to_phy_idx(pDC, link->link_enc->transmitter); + + cmd.assr_enable.header.type = DMUB_CMD__PSP; + cmd.assr_enable.header.sub_type = DMUB_CMD__PSP_ASSR_ENABLE; + cmd.assr_enable.assr_data.enable = enable; + cmd.assr_enable.assr_data.phy_port_type = phy_type; + cmd.assr_enable.assr_data.phy_port_id = phy_id; + cmd.assr_enable.assr_data.link_enc_index = link_enc_index; + cmd.assr_enable.assr_data.hpo_mode = use_hpo_dp_link_enc; + + dc_wake_and_execute_dmub_cmd(pDC->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx, + enum dp_panel_mode *panel_mode, bool enable) +{ + struct link_resource *link_res = &pipe_ctx->link_res; + struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; + + if (*panel_mode != DP_PANEL_MODE_EDP) + return; + + if (link->dc->config.use_assr_psp_message) { + edp_set_assr_enable(link->dc, link, link_res, enable); + } else if (cp_psp && cp_psp->funcs.enable_assr && enable) { + /* ASSR is bound to fail with unsigned PSP + * verstage used during devlopment phase. + * Report and continue with eDP panel mode to + * perform eDP link training with right settings + */ + bool result; + + result = cp_psp->funcs.enable_assr(cp_psp->handle, link); + + if (!result && link->panel_mode != DP_PANEL_MODE_EDP) + *panel_mode = DP_PANEL_MODE_DEFAULT; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index a158c6234d42..eee8a4db6f85 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -76,4 +76,6 @@ bool edp_receiver_ready_T9(struct dc_link *link); bool edp_receiver_ready_T7(struct dc_link *link); bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable); void edp_set_panel_power(struct dc_link *link, bool powerOn); +void edp_set_panel_assr(struct dc_link *link, struct pipe_ctx *pipe_ctx, + enum dp_panel_mode *panel_mode, bool enable); #endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 5b486400dfdb..2dfd73d5f767 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1864,6 +1864,9 @@ static bool dcn351_resource_construct( /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; + /* Use psp mailbox to enable assr */ + dc->config.use_assr_psp_message = true; + /* read VBIOS LTTPR caps */ { if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 818e5d87f0da..34cb25c6166a 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -1198,6 +1198,11 @@ enum dmub_cmd_type { */ DMUB_CMD__DPIA_HPD_INT_ENABLE = 86, + /** + * Command type used for all PSP commands. + */ + DMUB_CMD__PSP = 88, + DMUB_CMD__VBIOS = 128, }; @@ -4303,6 +4308,65 @@ struct dmub_rb_cmd_secure_display { } roi_info; }; +/** + * Command type of a DMUB_CMD__PSP command + */ +enum dmub_cmd_psp_type { + DMUB_CMD__PSP_ASSR_ENABLE = 0 +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__PSP_ASSR_ENABLE command. + */ +struct dmub_cmd_assr_enable_data { + /** + * ASSR enable or disable. + */ + uint8_t enable; + /** + * PHY port type. + * Indicates eDP / non-eDP port type + */ + uint8_t phy_port_type; + /** + * PHY port ID. + */ + uint8_t phy_port_id; + /** + * Link encoder index. + */ + uint8_t link_enc_index; + /** + * HPO mode. + */ + uint8_t hpo_mode; + + /** + * Reserved field. + */ + uint8_t reserved[7]; +}; + +/** + * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. + */ +struct dmub_rb_cmd_assr_enable { + /** + * Command header. + */ + struct dmub_cmd_header header; + + /** + * Assr data. + */ + struct dmub_cmd_assr_enable_data assr_data; + + /** + * Reserved field. + */ + uint32_t reserved[3]; +}; + /** * union dmub_rb_cmd - DMUB inbox command. */ @@ -4561,6 +4625,10 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. */ struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal; + /** + * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. + */ + struct dmub_rb_cmd_assr_enable assr_enable; }; /** -- cgit v1.2.3-70-g09d2 From 3be6a63e06573d4e55fabda8873b73327cf73da8 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Fri, 23 Feb 2024 10:46:34 -0500 Subject: drm/amd/display: Enable reallow for idle on DCN35 [Why] It was previously disabled for stability purposes, but command submission causes residency issues in IPS video playback. [How] Enable the disallow/reallow pattern back. There's additional checks now in DMCUB that should make this safer stability wise. Reviewed-by: Duncan Ma Acked-by: Wayne Lin Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index a8f4023ff3b1..49cccf3c0f9c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -783,7 +783,7 @@ static const struct dc_debug_options debug_defaults_drv = { .psp_disabled_wa = true, .ips2_eval_delay_us = 2000, .ips2_entry_delay_us = 800, - .disable_dmub_reallow_idle = true, + .disable_dmub_reallow_idle = false, .static_screen_wait_frames = 2, }; -- cgit v1.2.3-70-g09d2 From 734ae8ef625e30ea9b3ea0e7c8110bdceb6ddf26 Mon Sep 17 00:00:00 2001 From: "Lin.Cao" Date: Fri, 15 Mar 2024 11:50:11 +0800 Subject: drm/amd/pm set pp_dpm_*clk as read only for SRIOV one VF mode pp_dpm_*clk should be set as read only for SRIOV one VF mode, remove S_IWUGO flag and _store function of these debugfs in one VF mode. Signed-off-by: Lin.Cao Acked-by: Jingwen Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index c90f5972e03e..85e935556d7d 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2379,7 +2379,15 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } /* setting should not be allowed from VF if not in one VF mode */ - if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { + if (amdgpu_sriov_vf(adev) && (!amdgpu_sriov_is_pp_one_vf(adev) || + DEVICE_ATTR_IS(pp_dpm_sclk) || + DEVICE_ATTR_IS(pp_dpm_mclk) || + DEVICE_ATTR_IS(pp_dpm_socclk) || + DEVICE_ATTR_IS(pp_dpm_fclk) || + DEVICE_ATTR_IS(pp_dpm_vclk) || + DEVICE_ATTR_IS(pp_dpm_vclk1) || + DEVICE_ATTR_IS(pp_dpm_dclk) || + DEVICE_ATTR_IS(pp_dpm_dclk1))) { dev_attr->attr.mode &= ~S_IWUGO; dev_attr->store = NULL; } -- cgit v1.2.3-70-g09d2 From 9ddafd1d140441d288d3fc376d2809995ce504b7 Mon Sep 17 00:00:00 2001 From: Peyton Lee Date: Wed, 13 Mar 2024 16:53:49 +0800 Subject: drm/amdgpu/vpe: power on vpe when hw_init To fix mode2 reset failure. Should power on VPE when hw_init. Signed-off-by: Peyton Lee Reviewed-by: Lang Yu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c index 7a65a2b128ec..6695481f870f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c @@ -396,6 +396,12 @@ static int vpe_hw_init(void *handle) struct amdgpu_vpe *vpe = &adev->vpe; int ret; + /* Power on VPE */ + ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, + AMD_PG_STATE_UNGATE); + if (ret) + return ret; + ret = vpe_load_microcode(vpe); if (ret) return ret; -- cgit v1.2.3-70-g09d2 From 621cf07a3f25337b17becd4c9486308c0988ea49 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Mon, 4 Mar 2024 18:16:43 -0500 Subject: drm/amd/display: fix a bug to dereference already freed old current state memory [why] During minimal transition commit, the base state could be freed if it is current state. This is because after committing minimal transition state, the current state will be swapped to the minimal transition state and the old current state will be released. the release could cause the old current state's memory to be freed. However dc will derefernce this memory when release minimal transition state. Therefore, we need to retain the old current state until we release minimal transition state. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Josip Pavic Acked-by: Wayne Lin Signed-off-by: Wenjing Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2c7c3a788ab3..7222917e48bb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -4203,7 +4203,6 @@ static void release_minimal_transition_state(struct dc *dc, { restore_minimal_pipe_split_policy(dc, base_context, policy); dc_state_release(minimal_transition_context); - /* restore previous pipe split and odm policy */ } static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context) @@ -4258,7 +4257,7 @@ static bool is_pipe_topology_transition_seamless_with_intermediate_step( intermediate_state, final_state); } -static void swap_and_free_current_context(struct dc *dc, +static void swap_and_release_current_context(struct dc *dc, struct dc_state *new_context, struct dc_stream_state *stream) { @@ -4320,7 +4319,7 @@ static bool commit_minimal_transition_based_on_new_context(struct dc *dc, commit_planes_for_stream(dc, srf_updates, surface_count, stream, NULL, UPDATE_TYPE_FULL, intermediate_context); - swap_and_free_current_context( + swap_and_release_current_context( dc, intermediate_context, stream); dc_state_retain(dc->current_state); success = true; @@ -4337,6 +4336,7 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc, bool success = false; struct pipe_split_policy_backup policy; struct dc_state *intermediate_context; + struct dc_state *old_current_state = dc->current_state; struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0}; int surface_count; @@ -4352,8 +4352,10 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc, * with the current state. */ restore_planes_and_stream_state(&dc->scratch.current_state, stream); + dc_state_retain(old_current_state); intermediate_context = create_minimal_transition_state(dc, - dc->current_state, &policy); + old_current_state, &policy); + if (intermediate_context) { if (is_pipe_topology_transition_seamless_with_intermediate_step( dc, @@ -4366,14 +4368,15 @@ static bool commit_minimal_transition_based_on_current_context(struct dc *dc, commit_planes_for_stream(dc, srf_updates, surface_count, stream, NULL, UPDATE_TYPE_FULL, intermediate_context); - swap_and_free_current_context( + swap_and_release_current_context( dc, intermediate_context, stream); dc_state_retain(dc->current_state); success = true; } release_minimal_transition_state(dc, intermediate_context, - dc->current_state, &policy); + old_current_state, &policy); } + dc_state_release(old_current_state); /* * Restore stream and plane states back to the values associated with * new context. @@ -4497,12 +4500,14 @@ static bool commit_minimal_transition_state(struct dc *dc, dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" : "Unknown"); + dc_state_retain(transition_base_context); transition_context = create_minimal_transition_state(dc, transition_base_context, &policy); if (transition_context) { ret = dc_commit_state_no_check(dc, transition_context); release_minimal_transition_state(dc, transition_context, transition_base_context, &policy); } + dc_state_release(transition_base_context); if (ret != DC_OK) { /* this should never happen */ @@ -4840,7 +4845,7 @@ static bool update_planes_and_stream_v2(struct dc *dc, context); } if (dc->current_state != context) - swap_and_free_current_context(dc, context, stream); + swap_and_release_current_context(dc, context, stream); return true; } @@ -4942,7 +4947,7 @@ static bool update_planes_and_stream_v3(struct dc *dc, commit_planes_and_stream_update_with_new_context(dc, srf_updates, surface_count, stream, stream_update, update_type, new_context); - swap_and_free_current_context(dc, new_context, stream); + swap_and_release_current_context(dc, new_context, stream); } return true; -- cgit v1.2.3-70-g09d2 From aae9734b5eb7c4cce9020ecb58bb24db63014a5a Mon Sep 17 00:00:00 2001 From: Chaitanya Dhere Date: Fri, 8 Mar 2024 10:12:30 -0500 Subject: drm/amd/display: Add TB_BORROWED_MAX definition [Why & How] For DML2 to decouple it from other DML versions. Reviewed-by: Dillon Varone Acked-by: Wayne Lin Signed-off-by: Chaitanya Dhere Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 9be5ebf3a8c0..9184772d2e38 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -31,6 +31,8 @@ #include "dml_assert.h" #define DML2_MAX_FMT_420_BUFFER_WIDTH 4096 +#define TB_BORROWED_MAX 400 + // --------------------------- // Declaration Begins // --------------------------- -- cgit v1.2.3-70-g09d2 From b1edfb91fc70f25aaed1d2ebb507c434220057a8 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 10 Mar 2024 16:44:21 -0400 Subject: drm/amd/display: 3.2.277 This version brings along following fixes: - Fix few problems for DCN35 - Fix a bug which dereferences freed memory - Enable new interface design for alternate scrambling - Enhance IPS handshake - Increase Z8 watermark times - Fix DML2 problem - Revert patch which cause regression - Fix problems for dmub idle power optimization Acked-by: Wayne Lin Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 673f36543f6c..e0b44c43e959 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -51,7 +51,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.276" +#define DC_VER "3.2.277" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3-70-g09d2 From 9022f01b9709331c485703e49b3a8b2633513a92 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Thu, 21 Mar 2024 00:53:35 +0530 Subject: drm/amdgpu: refactor code to split devcoredump code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Refractor devcoredump code into new files since its functionality is expanded further and better to slit and devcoredump to have its own file. v2: Fix the build failure caught by arm compiler of implicit function declaration with #ifdef v3: squash in fix for implicit declaration error Cc: Ivan Lipski Acked-by: Christian König Signed-off-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c | 216 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h | 47 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c | 191 -------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h | 16 -- 6 files changed, 265 insertions(+), 208 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 535e3936cfe0..1f6b56ec99f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -81,7 +81,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ amdgpu_fw_attestation.o amdgpu_securedisplay.o \ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \ - amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o + amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c new file mode 100644 index 000000000000..44c5da8aa9ce --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include "amdgpu_dev_coredump.h" + +#ifndef CONFIG_DEV_COREDUMP +void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, + struct amdgpu_reset_context *reset_context) +{ +} +#else + +const char *hw_ip_names[MAX_HWIP] = { + [GC_HWIP] = "GC", + [HDP_HWIP] = "HDP", + [SDMA0_HWIP] = "SDMA0", + [SDMA1_HWIP] = "SDMA1", + [SDMA2_HWIP] = "SDMA2", + [SDMA3_HWIP] = "SDMA3", + [SDMA4_HWIP] = "SDMA4", + [SDMA5_HWIP] = "SDMA5", + [SDMA6_HWIP] = "SDMA6", + [SDMA7_HWIP] = "SDMA7", + [LSDMA_HWIP] = "LSDMA", + [MMHUB_HWIP] = "MMHUB", + [ATHUB_HWIP] = "ATHUB", + [NBIO_HWIP] = "NBIO", + [MP0_HWIP] = "MP0", + [MP1_HWIP] = "MP1", + [UVD_HWIP] = "UVD/JPEG/VCN", + [VCN1_HWIP] = "VCN1", + [VCE_HWIP] = "VCE", + [VPE_HWIP] = "VPE", + [DF_HWIP] = "DF", + [DCE_HWIP] = "DCE", + [OSSSYS_HWIP] = "OSSSYS", + [SMUIO_HWIP] = "SMUIO", + [PWR_HWIP] = "PWR", + [NBIF_HWIP] = "NBIF", + [THM_HWIP] = "THM", + [CLK_HWIP] = "CLK", + [UMC_HWIP] = "UMC", + [RSMU_HWIP] = "RSMU", + [XGMI_HWIP] = "XGMI", + [DCI_HWIP] = "DCI", + [PCIE_HWIP] = "PCIE", +}; + +static ssize_t +amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, + void *data, size_t datalen) +{ + struct drm_printer p; + struct amdgpu_coredump_info *coredump = data; + struct drm_print_iterator iter; + struct amdgpu_vm_fault_info *fault_info; + int i, ver; + + iter.data = buffer; + iter.offset = 0; + iter.start = offset; + iter.remain = count; + + p = drm_coredump_printer(&iter); + + drm_printf(&p, "**** AMDGPU Device Coredump ****\n"); + drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n"); + drm_printf(&p, "kernel: " UTS_RELEASE "\n"); + drm_printf(&p, "module: " KBUILD_MODNAME "\n"); + drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, + coredump->reset_time.tv_nsec); + + if (coredump->reset_task_info.pid) + drm_printf(&p, "process_name: %s PID: %d\n", + coredump->reset_task_info.process_name, + coredump->reset_task_info.pid); + + /* GPU IP's information of the SOC */ + drm_printf(&p, "\nIP Information\n"); + drm_printf(&p, "SOC Family: %d\n", coredump->adev->family); + drm_printf(&p, "SOC Revision id: %d\n", coredump->adev->rev_id); + drm_printf(&p, "SOC External Revision id: %d\n", coredump->adev->external_rev_id); + + for (int i = 1; i < MAX_HWIP; i++) { + for (int j = 0; j < HWIP_MAX_INSTANCE; j++) { + ver = coredump->adev->ip_versions[i][j]; + if (ver) + drm_printf(&p, "HWIP: %s[%d][%d]: v%d.%d.%d.%d.%d\n", + hw_ip_names[i], i, j, + IP_VERSION_MAJ(ver), + IP_VERSION_MIN(ver), + IP_VERSION_REV(ver), + IP_VERSION_VARIANT(ver), + IP_VERSION_SUBREV(ver)); + } + } + + if (coredump->ring) { + drm_printf(&p, "\nRing timed out details\n"); + drm_printf(&p, "IP Type: %d Ring Name: %s\n", + coredump->ring->funcs->type, + coredump->ring->name); + } + + /* Add page fault information */ + fault_info = &coredump->adev->vm_manager.fault_info; + drm_printf(&p, "\n[%s] Page fault observed\n", + fault_info->vmhub ? "mmhub" : "gfxhub"); + drm_printf(&p, "Faulty page starting at address: 0x%016llx\n", fault_info->addr); + drm_printf(&p, "Protection fault status register: 0x%x\n\n", fault_info->status); + + /* Add ring buffer information */ + drm_printf(&p, "Ring buffer information\n"); + for (int i = 0; i < coredump->adev->num_rings; i++) { + int j = 0; + struct amdgpu_ring *ring = coredump->adev->rings[i]; + + drm_printf(&p, "ring name: %s\n", ring->name); + drm_printf(&p, "Rptr: 0x%llx Wptr: 0x%llx RB mask: %x\n", + amdgpu_ring_get_rptr(ring), + amdgpu_ring_get_wptr(ring), + ring->buf_mask); + drm_printf(&p, "Ring size in dwords: %d\n", + ring->ring_size / 4); + drm_printf(&p, "Ring contents\n"); + drm_printf(&p, "Offset \t Value\n"); + + while (j < ring->ring_size) { + drm_printf(&p, "0x%x \t 0x%x\n", j, ring->ring[j / 4]); + j += 4; + } + } + + if (coredump->reset_vram_lost) + drm_printf(&p, "VRAM is lost due to GPU reset!\n"); + if (coredump->adev->reset_info.num_regs) { + drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n"); + + for (i = 0; i < coredump->adev->reset_info.num_regs; i++) + drm_printf(&p, "0x%08x: 0x%08x\n", + coredump->adev->reset_info.reset_dump_reg_list[i], + coredump->adev->reset_info.reset_dump_reg_value[i]); + } + + return count - iter.remain; +} + +static void amdgpu_devcoredump_free(void *data) +{ + kfree(data); +} + +void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, + struct amdgpu_reset_context *reset_context) +{ + struct amdgpu_coredump_info *coredump; + struct drm_device *dev = adev_to_drm(adev); + struct amdgpu_job *job = reset_context->job; + struct drm_sched_job *s_job; + + coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT); + + if (!coredump) { + DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__); + return; + } + + coredump->reset_vram_lost = vram_lost; + + if (reset_context->job && reset_context->job->vm) { + struct amdgpu_task_info *ti; + struct amdgpu_vm *vm = reset_context->job->vm; + + ti = amdgpu_vm_get_task_info_vm(vm); + if (ti) { + coredump->reset_task_info = *ti; + amdgpu_vm_put_task_info(ti); + } + } + + if (job) { + s_job = &job->base; + coredump->ring = to_amdgpu_ring(s_job->sched); + } + + coredump->adev = adev; + + ktime_get_ts64(&coredump->reset_time); + + dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT, + amdgpu_devcoredump_read, amdgpu_devcoredump_free); +} +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h new file mode 100644 index 000000000000..52459512cb2b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_DEV_COREDUMP_H__ +#define __AMDGPU_DEV_COREDUMP_H__ + +#include "amdgpu.h" +#include "amdgpu_reset.h" + +#ifdef CONFIG_DEV_COREDUMP + +#define AMDGPU_COREDUMP_VERSION "1" + +struct amdgpu_coredump_info { + struct amdgpu_device *adev; + struct amdgpu_task_info reset_task_info; + struct timespec64 reset_time; + bool reset_vram_lost; + struct amdgpu_ring *ring; +}; +#endif + +void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, + struct amdgpu_reset_context *reset_context); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3204b8f6edeb..f771b2042a43 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -74,6 +74,7 @@ #include "amdgpu_fru_eeprom.h" #include "amdgpu_reset.h" #include "amdgpu_virt.h" +#include "amdgpu_dev_coredump.h" #include #include diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 3398f2a368d5..ea4873f6ccd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -21,50 +21,11 @@ * */ -#include -#include - #include "amdgpu_reset.h" #include "aldebaran.h" #include "sienna_cichlid.h" #include "smu_v13_0_10.h" -const char *hw_ip_names[MAX_HWIP] = { - [GC_HWIP] = "GC", - [HDP_HWIP] = "HDP", - [SDMA0_HWIP] = "SDMA0", - [SDMA1_HWIP] = "SDMA1", - [SDMA2_HWIP] = "SDMA2", - [SDMA3_HWIP] = "SDMA3", - [SDMA4_HWIP] = "SDMA4", - [SDMA5_HWIP] = "SDMA5", - [SDMA6_HWIP] = "SDMA6", - [SDMA7_HWIP] = "SDMA7", - [LSDMA_HWIP] = "LSDMA", - [MMHUB_HWIP] = "MMHUB", - [ATHUB_HWIP] = "ATHUB", - [NBIO_HWIP] = "NBIO", - [MP0_HWIP] = "MP0", - [MP1_HWIP] = "MP1", - [UVD_HWIP] = "UVD/JPEG/VCN", - [VCN1_HWIP] = "VCN1", - [VCE_HWIP] = "VCE", - [VPE_HWIP] = "VPE", - [DF_HWIP] = "DF", - [DCE_HWIP] = "DCE", - [OSSSYS_HWIP] = "OSSSYS", - [SMUIO_HWIP] = "SMUIO", - [PWR_HWIP] = "PWR", - [NBIF_HWIP] = "NBIF", - [THM_HWIP] = "THM", - [CLK_HWIP] = "CLK", - [UMC_HWIP] = "UMC", - [RSMU_HWIP] = "RSMU", - [XGMI_HWIP] = "XGMI", - [DCI_HWIP] = "DCI", - [PCIE_HWIP] = "PCIE", -}; - int amdgpu_reset_init(struct amdgpu_device *adev) { int ret = 0; @@ -197,155 +158,3 @@ void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain) atomic_set(&reset_domain->in_gpu_reset, 0); up_write(&reset_domain->sem); } - -#ifndef CONFIG_DEV_COREDUMP -void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, - struct amdgpu_reset_context *reset_context) -{ -} -#else -static ssize_t -amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, - void *data, size_t datalen) -{ - struct drm_printer p; - struct amdgpu_coredump_info *coredump = data; - struct drm_print_iterator iter; - struct amdgpu_vm_fault_info *fault_info; - int i, ver; - - iter.data = buffer; - iter.offset = 0; - iter.start = offset; - iter.remain = count; - - p = drm_coredump_printer(&iter); - - drm_printf(&p, "**** AMDGPU Device Coredump ****\n"); - drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n"); - drm_printf(&p, "kernel: " UTS_RELEASE "\n"); - drm_printf(&p, "module: " KBUILD_MODNAME "\n"); - drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec, - coredump->reset_time.tv_nsec); - - if (coredump->reset_task_info.pid) - drm_printf(&p, "process_name: %s PID: %d\n", - coredump->reset_task_info.process_name, - coredump->reset_task_info.pid); - - /* GPU IP's information of the SOC */ - drm_printf(&p, "\nIP Information\n"); - drm_printf(&p, "SOC Family: %d\n", coredump->adev->family); - drm_printf(&p, "SOC Revision id: %d\n", coredump->adev->rev_id); - drm_printf(&p, "SOC External Revision id: %d\n", coredump->adev->external_rev_id); - - for (int i = 1; i < MAX_HWIP; i++) { - for (int j = 0; j < HWIP_MAX_INSTANCE; j++) { - ver = coredump->adev->ip_versions[i][j]; - if (ver) - drm_printf(&p, "HWIP: %s[%d][%d]: v%d.%d.%d.%d.%d\n", - hw_ip_names[i], i, j, - IP_VERSION_MAJ(ver), - IP_VERSION_MIN(ver), - IP_VERSION_REV(ver), - IP_VERSION_VARIANT(ver), - IP_VERSION_SUBREV(ver)); - } - } - - if (coredump->ring) { - drm_printf(&p, "\nRing timed out details\n"); - drm_printf(&p, "IP Type: %d Ring Name: %s\n", - coredump->ring->funcs->type, - coredump->ring->name); - } - - /* Add page fault information */ - fault_info = &coredump->adev->vm_manager.fault_info; - drm_printf(&p, "\n[%s] Page fault observed\n", - fault_info->vmhub ? "mmhub" : "gfxhub"); - drm_printf(&p, "Faulty page starting at address: 0x%016llx\n", fault_info->addr); - drm_printf(&p, "Protection fault status register: 0x%x\n\n", fault_info->status); - - /* Add ring buffer information */ - drm_printf(&p, "Ring buffer information\n"); - for (int i = 0; i < coredump->adev->num_rings; i++) { - int j = 0; - struct amdgpu_ring *ring = coredump->adev->rings[i]; - - drm_printf(&p, "ring name: %s\n", ring->name); - drm_printf(&p, "Rptr: 0x%llx Wptr: 0x%llx RB mask: %x\n", - amdgpu_ring_get_rptr(ring), - amdgpu_ring_get_wptr(ring), - ring->buf_mask); - drm_printf(&p, "Ring size in dwords: %d\n", - ring->ring_size / 4); - drm_printf(&p, "Ring contents\n"); - drm_printf(&p, "Offset \t Value\n"); - - while (j < ring->ring_size) { - drm_printf(&p, "0x%x \t 0x%x\n", j, ring->ring[j/4]); - j += 4; - } - } - - if (coredump->reset_vram_lost) - drm_printf(&p, "VRAM is lost due to GPU reset!\n"); - if (coredump->adev->reset_info.num_regs) { - drm_printf(&p, "AMDGPU register dumps:\nOffset: Value:\n"); - - for (i = 0; i < coredump->adev->reset_info.num_regs; i++) - drm_printf(&p, "0x%08x: 0x%08x\n", - coredump->adev->reset_info.reset_dump_reg_list[i], - coredump->adev->reset_info.reset_dump_reg_value[i]); - } - - return count - iter.remain; -} - -static void amdgpu_devcoredump_free(void *data) -{ - kfree(data); -} - -void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, - struct amdgpu_reset_context *reset_context) -{ - struct amdgpu_coredump_info *coredump; - struct drm_device *dev = adev_to_drm(adev); - struct amdgpu_job *job = reset_context->job; - struct drm_sched_job *s_job; - - coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT); - - if (!coredump) { - DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__); - return; - } - - coredump->reset_vram_lost = vram_lost; - - if (reset_context->job && reset_context->job->vm) { - struct amdgpu_task_info *ti; - struct amdgpu_vm *vm = reset_context->job->vm; - - ti = amdgpu_vm_get_task_info_vm(vm); - if (ti) { - coredump->reset_task_info = *ti; - amdgpu_vm_put_task_info(ti); - } - } - - if (job) { - s_job = &job->base; - coredump->ring = to_amdgpu_ring(s_job->sched); - } - - coredump->adev = adev; - - ktime_get_ts64(&coredump->reset_time); - - dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT, - amdgpu_devcoredump_read, amdgpu_devcoredump_free); -} -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index 60522963aaca..66125d43cf21 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -88,19 +88,6 @@ struct amdgpu_reset_domain { atomic_t reset_res; }; -#ifdef CONFIG_DEV_COREDUMP - -#define AMDGPU_COREDUMP_VERSION "1" - -struct amdgpu_coredump_info { - struct amdgpu_device *adev; - struct amdgpu_task_info reset_task_info; - struct timespec64 reset_time; - bool reset_vram_lost; - struct amdgpu_ring *ring; -}; -#endif - int amdgpu_reset_init(struct amdgpu_device *adev); int amdgpu_reset_fini(struct amdgpu_device *adev); @@ -141,9 +128,6 @@ void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain); void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain); -void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, - struct amdgpu_reset_context *reset_context); - #define for_each_handler(i, handler, reset_ctl) \ for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \ (handler = (*reset_ctl->reset_handlers)[i]); \ -- cgit v1.2.3-70-g09d2 From acf760c890b3a2bf0501c0710e2d5a839c0e57d7 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Wed, 20 Mar 2024 15:53:47 -0400 Subject: drm/amdkfd: fix TLB flush after unmap for GFX9.4.2 TLB flush after unmap accidentially was removed on gfx9.4.2. It is to add it back. Signed-off-by: Eric Huang Reviewed-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 42d40560cd30..a81ef232fdef 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1473,7 +1473,7 @@ static inline void kfd_flush_tlb(struct kfd_process_device *pdd, static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) { - return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) || + return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 2) || (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); } -- cgit v1.2.3-70-g09d2 From f88a7dd06ab435f8c07dc9b84a003f321c08cd72 Mon Sep 17 00:00:00 2001 From: Shashank Sharma Date: Thu, 21 Mar 2024 15:21:00 +0100 Subject: drm/amdgpu: Add a NULL check for freeing root PT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds a NULL check to fix this crash reported during the freeing of root PT entry: BUG: unable to handle page fault for address: ffffc9002d637aa0 #PF: supervisor write access in kernel mode #PF: error_code(0x0002) - not-present page RIP: 0010:amdgpu_vm_pt_free+0x66/0xe0 [amdgpu] PKRU: 55555554 Call Trace: amdgpu_vm_pt_free_root+0x60/0xa0 [amdgpu] amdgpu_vm_fini+0x2cb/0x5d0 [amdgpu] ? amdgpu_ctx_mgr_entity_fini+0x53/0x1c0 [amdgpu] amdgpu_driver_postclose_kms+0x191/0x2d0 [amdgpu] drm_file_free.part.0+0x1e5/0x260 [drm] Cc: Christian König Cc: Alex Deucher Cc: Felix Kuehling Cc: Rajneesh Bhardwaj Acked-by: Christian König Signed-off-by: Shashank Sharma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index cbe0ae4c8738..7fdd306a48a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -688,8 +688,10 @@ void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct amdgpu_vm_pt_cursor cursor; struct amdgpu_vm_bo_base *entry; - for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) - amdgpu_vm_pt_free(entry); + for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) { + if (entry) + amdgpu_vm_pt_free(entry); + } } /** -- cgit v1.2.3-70-g09d2 From 5daa29473cf63aa973aa3fcaf8c9a50ada359a45 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 12 Mar 2024 11:21:32 -0400 Subject: Revert "drm/amd/display: Fix sending VSC (+ colorimetry) packets for DP/eDP displays without PSR" This causes flicker on a bunch of eDP panels. The info_packet code also caused regressions on other OSes that we haven't' seen on Linux yet, but that is likely due to the fact that we haven't had a chance to test those environments on Linux. We'll need to revisit this. This reverts commit 202260f64519e591b5cd99626e441b6559f571a3. Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3207 Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3151 Signed-off-by: Harry Wentland Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 +++----- .../gpu/drm/amd/display/modules/info_packet/info_packet.c | 13 +++++-------- 2 files changed, 8 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2851719d7121..71d2d44681b2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6305,9 +6305,8 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || - stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || - stream->signal == SIGNAL_TYPE_EDP) { + + if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet @@ -6323,9 +6322,8 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); + aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; - if (stream->link->psr_settings.psr_feature_enabled) - aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } finish: dc_sink_release(sink); diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index bb9f39bf5b5b..a344e2e49b0e 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -147,15 +147,12 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, } /* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */ - if (stream->link->psr_settings.psr_feature_enabled) { - if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) - vsc_packet_revision = vsc_packet_rev4; - else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) - vsc_packet_revision = vsc_packet_rev2; - } - - if (stream->link->replay_settings.config.replay_supported) + if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) + vsc_packet_revision = vsc_packet_rev4; + else if (stream->link->replay_settings.config.replay_supported) vsc_packet_revision = vsc_packet_rev4; + else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) + vsc_packet_revision = vsc_packet_rev2; /* Update to revision 5 for extended colorimetry support */ if (stream->use_vsc_sdp_for_colorimetry) -- cgit v1.2.3-70-g09d2 From fb880635e08f28e18cba28db1f6e11bd4bb8828e Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 1 Mar 2024 12:25:17 -0500 Subject: drm/amdkfd: range check cp bad op exception interrupts Due to a CP interrupt bug, bad packet garbage exception codes are raised. Do a range check so that the debugger and runtime do not receive garbage codes. Update the user api to guard exception code type checking as well. Signed-off-by: Jonathan Kim Tested-by: Jesse Zhang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 3 ++- include/uapi/linux/kfd_ioctl.h | 17 ++++++++++++++--- 4 files changed, 20 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c index 740f89aafbc0..8e0d0356e810 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -342,7 +342,8 @@ static void event_interrupt_wq_v10(struct kfd_node *dev, break; } kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23); - } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) { + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE && + KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) { kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_DEBUG_DOORBELL_ID(context_id0), KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index d3d6b5c180b3..f524a55eee11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -329,7 +329,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, /* CP */ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) kfd_signal_event_interrupt(pasid, context_id0, 32); - else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) + else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE && + KFD_DBG_EC_TYPE_IS_PACKET(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0))) kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_CTXID0_DOORBELL_ID(context_id0), KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)), diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 2a37ab7a7150..c368c70df3f4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -398,7 +398,8 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, break; } kfd_signal_event_interrupt(pasid, sq_int_data, 24); - } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) { + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE && + KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) { kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_DEBUG_DOORBELL_ID(context_id0), KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 9ce46edc62a5..2040a470ddb4 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -913,14 +913,25 @@ enum kfd_dbg_trap_exception_code { KFD_EC_MASK(EC_DEVICE_NEW)) #define KFD_EC_MASK_PROCESS (KFD_EC_MASK(EC_PROCESS_RUNTIME) | \ KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE)) +#define KFD_EC_MASK_PACKET (KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) | \ + KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED)) /* Checks for exception code types for KFD search */ +#define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX) #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode) \ - (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE)) + (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE)) #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode) \ - (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE)) + (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE)) #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode) \ - (!!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS)) + (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS)) +#define KFD_DBG_EC_TYPE_IS_PACKET(ecode) \ + (KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET)) /* Runtime enable states */ -- cgit v1.2.3-70-g09d2 From 417f78a2a1c8c2d517db8b2e04785c1c94a563b4 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 20 Mar 2024 18:43:14 -0400 Subject: drm/amdkfd: Cleanup workqueue during module unload Destroy the high priority workqueue that handles interrupts during KFD node cleanup. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index dd3c43c1ad70..9b6b6e882593 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -104,6 +104,8 @@ void kfd_interrupt_exit(struct kfd_node *node) */ flush_workqueue(node->ih_wq); + destroy_workqueue(node->ih_wq); + kfifo_free(&node->ih_fifo); } -- cgit v1.2.3-70-g09d2 From 8b3495eafb4ddd4e1963834ce9a56c729128d62e Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Wed, 20 Mar 2024 13:57:29 +0800 Subject: drm/amdgpu: add socket id parameter for psp query address cmd And set the socket id. Signed-off-by: Tao Zhou Reviewed-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/ta_ras_if.h | 1 + drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 14 +++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 056d4df8fa1f..3ac56a9645eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -146,6 +146,7 @@ struct ta_ras_mca_addr { uint32_t ch_inst; uint32_t umc_inst; uint32_t node_inst; + uint32_t socket_id; }; struct ta_ras_phy_addr { diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 5f08d3dcf174..6a52a9f0dc30 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -268,7 +268,7 @@ static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev, static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst, - uint32_t node_inst) + uint32_t node_inst, uint32_t socket_id) { uint32_t col, row, row_xor, bank, channel_index; uint64_t soc_pa, retired_page, column; @@ -280,6 +280,7 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, addr_in.ma.ch_inst = ch_inst; addr_in.ma.umc_inst = umc_inst; addr_in.ma.node_inst = node_inst; + addr_in.ma.socket_id = socket_id; if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) /* fallback to old path if fail to get pa from psp */ @@ -331,6 +332,7 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data = (struct ras_err_data *)data; uint64_t umc_reg_offset = get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); + uint32_t socket_id = 0; mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); @@ -357,8 +359,13 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev, err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + if (!adev->aid_mask && + adev->smuio.funcs && + adev->smuio.funcs->get_socket_id) + socket_id = adev->smuio.funcs->get_socket_id(adev); + umc_v12_0_convert_error_address(adev, err_data, err_addr, - ch_inst, umc_inst, node_inst); + ch_inst, umc_inst, node_inst, socket_id); } /* clear umc status */ @@ -456,7 +463,8 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade err_data, err_addr, MCA_IPID_LO_2_UMC_CH(InstanceIdLo), MCA_IPID_LO_2_UMC_INST(InstanceIdLo), - err_info->mcm_info.die_id); + err_info->mcm_info.die_id, + err_info->mcm_info.socket_id); } /* Delete error address node from list and free memory */ -- cgit v1.2.3-70-g09d2 From 7c2bc34ab926447f1b329a57669adfce76eafc3c Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Wed, 20 Mar 2024 14:13:54 +0530 Subject: drm/amdgpu: Fix format character cut-off issues in amdgpu_vcn_early_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reducing the size of ucode_prefix to 25 in the amdgpu_vcn_early_init function. This would ensure that the total number of characters being written into fw_name does not exceed its size of 40. Fixes the below with gcc W=1: drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c: In function ‘amdgpu_vcn_early_init’: drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c:102:66: warning: ‘snprintf’ output may be truncated before the last format character [-Wformat-truncation=] 102 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); | ^ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c:102:17: note: ‘snprintf’ output between 12 and 41 bytes into a destination of size 40 102 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c:102:66: warning: ‘snprintf’ output may be truncated before the last format character [-Wformat-truncation=] 102 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); | ^ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c:102:17: note: ‘snprintf’ output between 12 and 41 bytes into a destination of size 40 102 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c:105:73: warning: ‘.bin’ directive output may be truncated writing 4 bytes into a region of size between 2 and 31 [-Wformat-truncation=] 105 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i); | ^~~~ drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c:105:25: note: ‘snprintf’ output between 14 and 43 bytes into a destination of size 40 105 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cc: Alex Deucher Cc: Christian König Suggested-by: Lijo Lazar Signed-off-by: Srinivasan Shanmugam Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 9c514a606a2f..bb85772b1374 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -93,7 +93,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work); int amdgpu_vcn_early_init(struct amdgpu_device *adev) { - char ucode_prefix[30]; + char ucode_prefix[25]; char fw_name[40]; int r, i; -- cgit v1.2.3-70-g09d2 From 20fd14460f45a01b9ec63aa7b12e6c3c66e54fa7 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Thu, 21 Mar 2024 11:05:55 +0530 Subject: drm/amdgpu: Fix 'fw_name' buffer size to prevent truncations in amdgpu_mes_init_microcode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The snprintf function is used to write a formatted string into fw_name. The format of the string is "amdgpu/%s_mes%s.bin", where %s is replaced by the string in ucode_prefix and the second %s is replaced by either "_2" or "1" depending on the condition pipe == AMDGPU_MES_SCHED_PIPE. The length of the string "amdgpu/%s_mes%s.bin" is 16 characters plus the length of ucode_prefix and the length of the string "_2" or "1". The size of ucode_prefix is 30, so the maximum length of ucode_prefix is 29 characters (since one character is needed for the null terminator). Therefore, the maximum possible length of the string written into fw_name is 16 + 29 + 2 = 47 characters. The size of fw_name is 40, so if the length of the string written into fw_name is more than 39 characters (since one character is needed for the null terminator), it will be truncated by the snprintf function, and thus warnings will be seen. By increasing the size of fw_name to 50, we ensure that fw_name is large enough to hold the maximum possible length of the string, so the snprintf function will not truncate the output. Fixes the below with gcc W=1: drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c: In function ‘amdgpu_mes_init_microcode’: drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:1482:66: warning: ‘%s’ directive output may be truncated writing up to 1 bytes into a region of size between 0 and 29 [-Wformat-truncation=] 1482 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", | ^~ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:1482:17: note: ‘snprintf’ output between 16 and 46 bytes into a destination of size 40 1482 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1483 | ucode_prefix, | ~~~~~~~~~~~~~ 1484 | pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1"); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:1477:66: warning: ‘%s’ directive output may be truncated writing 1 byte into a region of size between 0 and 29 [-Wformat-truncation=] 1477 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", | ^~ 1478 | ucode_prefix, 1479 | pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); | ~~~ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:1477:17: note: ‘snprintf’ output between 17 and 46 bytes into a destination of size 40 1477 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1478 | ucode_prefix, | ~~~~~~~~~~~~~ 1479 | pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:1477:66: warning: ‘%s’ directive output may be truncated writing 2 bytes into a region of size between 0 and 29 [-Wformat-truncation=] 1477 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", | ^~ 1478 | ucode_prefix, 1479 | pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); | ~~~~ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:1477:17: note: ‘snprintf’ output between 18 and 47 bytes into a destination of size 40 1477 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin", | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1478 | ucode_prefix, | ~~~~~~~~~~~~~ 1479 | pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1"); | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:1489:62: warning: ‘_mes.bin’ directive output may be truncated writing 8 bytes into a region of size between 4 and 33 [-Wformat-truncation=] 1489 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin", | ^~~~~~~~ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c:1489:17: note: ‘snprintf’ output between 16 and 45 bytes into a destination of size 40 1489 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin", | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1490 | ucode_prefix); | ~~~~~~~~~~~~~ Cc: Alex Deucher Cc: Christian König Suggested-by: Lijo Lazar Signed-off-by: Srinivasan Shanmugam Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index eb75c524b4a6..e4fcc950e542 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -1467,7 +1467,7 @@ int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe) const struct mes_firmware_header_v1_0 *mes_hdr; struct amdgpu_firmware_info *info; char ucode_prefix[30]; - char fw_name[40]; + char fw_name[50]; bool need_retry = false; int r; -- cgit v1.2.3-70-g09d2 From 927a8a800ebbea623c82ab9cc72b2827c4035aa9 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 22 Mar 2024 12:18:37 +0530 Subject: drm/amdgpu: Fix truncation in gfx_v10_0_init_microcode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The total size of the fw_name buffer is 8 (for "amdgpu/") + 30 (for ucode_prefix) + 5 (for "_pfp") + 5 (for "_wks") + 5 (for ".bin") = 53 characters. Fixes the below with gcc W=1: drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c: In function ‘gfx_v10_0_early_init’: drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:3982:58: warning: ‘%s’ directive output may be truncated writing up to 4 bytes into a region of size between 0 and 29 [-Wformat-truncation=] 3982 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", ucode_prefix, wks); | ^~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:3982:9: note: ‘snprintf’ output between 16 and 49 bytes into a destination of size 40 3982 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", ucode_prefix, wks); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:3988:57: warning: ‘%s’ directive output may be truncated writing up to 4 bytes into a region of size between 1 and 30 [-Wformat-truncation=] 3988 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", ucode_prefix, wks); | ^~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:3988:9: note: ‘snprintf’ output between 15 and 48 bytes into a destination of size 40 3988 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", ucode_prefix, wks); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:3994:57: warning: ‘%s’ directive output may be truncated writing up to 4 bytes into a region of size between 1 and 30 [-Wformat-truncation=] 3994 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", ucode_prefix, wks); | ^~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:3994:9: note: ‘snprintf’ output between 15 and 48 bytes into a destination of size 40 3994 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", ucode_prefix, wks); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:4001:62: warning: ‘_rlc.bin’ directive output may be truncated writing 8 bytes into a region of size between 4 and 33 [-Wformat-truncation=] 4001 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); | ^~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:4001:17: note: ‘snprintf’ output between 16 and 45 bytes into a destination of size 40 4001 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:4017:58: warning: ‘%s’ directive output may be truncated writing up to 4 bytes into a region of size between 0 and 29 [-Wformat-truncation=] 4017 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", ucode_prefix, wks); | ^~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:4017:9: note: ‘snprintf’ output between 16 and 49 bytes into a destination of size 40 4017 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", ucode_prefix, wks); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:4024:54: warning: ‘_mec2’ directive output may be truncated writing 5 bytes into a region of size between 4 and 33 [-Wformat-truncation=] 4024 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", ucode_prefix, wks); | ^~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c:4024:9: note: ‘snprintf’ output between 17 and 50 bytes into a destination of size 40 4024 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", ucode_prefix, wks); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cc: Alex Deucher Cc: Christian König Suggested-by: Lijo Lazar Signed-off-by: Srinivasan Shanmugam Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index f90905ef32c7..d524f1a353ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3964,7 +3964,7 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) static int gfx_v10_0_init_microcode(struct amdgpu_device *adev) { - char fw_name[40]; + char fw_name[53]; char ucode_prefix[30]; const char *wks = ""; int err; -- cgit v1.2.3-70-g09d2 From 539ff12ee5e423d6a17d746ed5cca0e1b8c38feb Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Thu, 21 Mar 2024 16:46:01 +0530 Subject: drm/amdgpu: Fix truncation issues in gfx_v9_0.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The size of fw_name is increased to ensure that it can accommodate the maximum possible size of the string being written into it. Fixes the below with gcc W=1: drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c: In function ‘gfx_v9_0_early_init’: drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1255:52: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 1255 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); | ^~ ...... 1393 | r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix); | ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1255:9: note: ‘snprintf’ output between 16 and 45 bytes into a destination of size 30 1255 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1261:52: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 1261 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); | ^~ ...... 1393 | r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix); | ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1261:9: note: ‘snprintf’ output between 15 and 44 bytes into a destination of size 30 1261 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1267:52: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 1267 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); | ^~ ...... 1393 | r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix); | ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1267:9: note: ‘snprintf’ output between 15 and 44 bytes into a destination of size 30 1267 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1303:60: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 1303 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name); | ^~ ...... 1398 | r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix); | ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1303:17: note: ‘snprintf’ output between 20 and 49 bytes into a destination of size 30 1303 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1309:60: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 1309 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name); | ^~ ...... 1398 | r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix); | ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1309:17: note: ‘snprintf’ output between 23 and 52 bytes into a destination of size 30 1309 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1311:60: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 1311 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); | ^~ ...... 1398 | r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix); | ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1311:17: note: ‘snprintf’ output between 16 and 45 bytes into a destination of size 30 1311 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1344:60: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 1344 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec.bin", chip_name); | ^~ ...... 1402 | r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix); | ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1344:17: note: ‘snprintf’ output between 20 and 49 bytes into a destination of size 30 1344 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec.bin", chip_name); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1346:60: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 1346 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); | ^~ ...... 1402 | r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix); | ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1346:17: note: ‘snprintf’ output between 16 and 45 bytes into a destination of size 30 1346 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1356:68: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 1356 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec2.bin", chip_name); | ^~ ...... 1402 | r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix); | ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1356:25: note: ‘snprintf’ output between 21 and 50 bytes into a destination of size 30 1356 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sjt_mec2.bin", chip_name); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1358:68: warning: ‘%s’ directive output may be truncated writing up to 29 bytes into a region of size 23 [-Wformat-truncation=] 1358 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); | ^~ ...... 1402 | r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix); | ~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c:1358:25: note: ‘snprintf’ output between 17 and 46 bytes into a destination of size 30 1358 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cc: Alex Deucher Cc: Christian König Suggested-by: Lijo Lazar Signed-off-by: Srinivasan Shanmugam Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 6f97a6d0e6d0..71b555993b7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1249,7 +1249,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev, char *chip_name) { - char fw_name[30]; + char fw_name[50]; int err; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name); @@ -1282,7 +1282,7 @@ out: static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev, char *chip_name) { - char fw_name[30]; + char fw_name[53]; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; uint16_t version_major; @@ -1337,7 +1337,7 @@ static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev) static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, char *chip_name) { - char fw_name[30]; + char fw_name[50]; int err; if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_ALDEBARAN)) -- cgit v1.2.3-70-g09d2 From 8e4617c25d53e1b40472bb802898a773832cc5d4 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 21 Mar 2024 11:14:12 +0800 Subject: drm/amdgpu: simplify convert_error_address interface for UMC v12 Replace separate parameters with struct ta_ras_query_address_input. Signed-off-by: Tao Zhou Reviewed-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 57 ++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index 6a52a9f0dc30..f46a176f9b55 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -266,26 +266,19 @@ static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev, } static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, - struct ras_err_data *err_data, uint64_t err_addr, - uint32_t ch_inst, uint32_t umc_inst, - uint32_t node_inst, uint32_t socket_id) + struct ras_err_data *err_data, + struct ta_ras_query_address_input *addr_in) { uint32_t col, row, row_xor, bank, channel_index; - uint64_t soc_pa, retired_page, column; - struct ta_ras_query_address_input addr_in; + uint64_t soc_pa, retired_page, column, err_addr; struct ta_ras_query_address_output addr_out; - addr_in.addr_type = TA_RAS_MCA_TO_PA; - addr_in.ma.err_addr = err_addr; - addr_in.ma.ch_inst = ch_inst; - addr_in.ma.umc_inst = umc_inst; - addr_in.ma.node_inst = node_inst; - addr_in.ma.socket_id = socket_id; - - if (psp_ras_query_address(&adev->psp, &addr_in, &addr_out)) + err_addr = addr_in->ma.err_addr; + addr_in->addr_type = TA_RAS_MCA_TO_PA; + if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) /* fallback to old path if fail to get pa from psp */ - umc_v12_0_mca_addr_to_pa(adev, err_addr, ch_inst, umc_inst, - node_inst, &addr_out); + umc_v12_0_mca_addr_to_pa(adev, err_addr, addr_in->ma.ch_inst, + addr_in->ma.umc_inst, addr_in->ma.node_inst, &addr_out); soc_pa = addr_out.pa.pa; bank = addr_out.pa.bank; @@ -310,7 +303,7 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", retired_page, row, col, bank, channel_index); amdgpu_umc_fill_error_record(err_data, err_addr, - retired_page, channel_index, umc_inst); + retired_page, channel_index, addr_in->ma.umc_inst); /* shift R13 bit */ retired_page ^= (0x1ULL << UMC_V12_0_PA_R13_BIT); @@ -318,7 +311,7 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, "Error Address(PA):0x%-10llx Row:0x%-4x Col:0x%-2x Bank:0x%x Channel:0x%x\n", retired_page, row_xor, col, bank, channel_index); amdgpu_umc_fill_error_record(err_data, err_addr, - retired_page, channel_index, umc_inst); + retired_page, channel_index, addr_in->ma.umc_inst); } } @@ -326,13 +319,13 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev, uint32_t node_inst, uint32_t umc_inst, uint32_t ch_inst, void *data) { + struct ras_err_data *err_data = (struct ras_err_data *)data; + struct ta_ras_query_address_input addr_in; uint64_t mc_umc_status_addr; uint64_t mc_umc_status, err_addr; uint64_t mc_umc_addrt0; - struct ras_err_data *err_data = (struct ras_err_data *)data; uint64_t umc_reg_offset = get_umc_v12_0_reg_offset(adev, node_inst, umc_inst, ch_inst); - uint32_t socket_id = 0; mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); @@ -362,10 +355,16 @@ static int umc_v12_0_query_error_address(struct amdgpu_device *adev, if (!adev->aid_mask && adev->smuio.funcs && adev->smuio.funcs->get_socket_id) - socket_id = adev->smuio.funcs->get_socket_id(adev); + addr_in.ma.socket_id = adev->smuio.funcs->get_socket_id(adev); + else + addr_in.ma.socket_id = 0; + + addr_in.ma.err_addr = err_addr; + addr_in.ma.ch_inst = ch_inst; + addr_in.ma.umc_inst = umc_inst; + addr_in.ma.node_inst = node_inst; - umc_v12_0_convert_error_address(adev, err_data, err_addr, - ch_inst, umc_inst, node_inst, socket_id); + umc_v12_0_convert_error_address(adev, err_data, &addr_in); } /* clear umc status */ @@ -431,12 +430,16 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade struct ras_err_info *err_info; struct ras_err_addr *mca_err_addr, *tmp; struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + struct ta_ras_query_address_input addr_in; for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; if (list_empty(&err_info->err_addr_list)) continue; + addr_in.ma.node_inst = err_info->mcm_info.die_id; + addr_in.ma.socket_id = err_info->mcm_info.socket_id; + list_for_each_entry_safe(mca_err_addr, tmp, &err_info->err_addr_list, node) { mc_umc_status = mca_err_addr->err_status; if (mc_umc_status && @@ -452,6 +455,10 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); InstanceIdLo = REG_GET_FIELD(mca_ipid, MCMP1_IPIDT0, InstanceIdLo); + addr_in.ma.err_addr = err_addr; + addr_in.ma.ch_inst = MCA_IPID_LO_2_UMC_CH(InstanceIdLo); + addr_in.ma.umc_inst = MCA_IPID_LO_2_UMC_INST(InstanceIdLo); + dev_info(adev->dev, "UMC:IPID:0x%llx, aid:%d, inst:%d, ch:%d, err_addr:0x%llx\n", mca_ipid, err_info->mcm_info.die_id, @@ -460,11 +467,7 @@ static void umc_v12_0_ecc_info_query_ras_error_address(struct amdgpu_device *ade err_addr); umc_v12_0_convert_error_address(adev, - err_data, err_addr, - MCA_IPID_LO_2_UMC_CH(InstanceIdLo), - MCA_IPID_LO_2_UMC_INST(InstanceIdLo), - err_info->mcm_info.die_id, - err_info->mcm_info.socket_id); + err_data, &addr_in); } /* Delete error address node from list and free memory */ -- cgit v1.2.3-70-g09d2 From 730dd50f84b51664e1c13258c21413e49f93394d Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 22 Mar 2024 12:32:47 +0530 Subject: drm/amdgpu: Fix truncation in smu_v11_0_init_microcode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reducing the size of ucode_prefix to 25 in the smu_v11_0_init_microcode function. we ensure that fw_name can accommodate the maximum possible string size Fixes the below with gcc W=1: drivers/gpu/drm/amd/amdgpu/../pm/swsmu/smu11/smu_v11_0.c: In function ‘smu_v11_0_init_microcode’: drivers/gpu/drm/amd/amdgpu/../pm/swsmu/smu11/smu_v11_0.c:110:54: warning: ‘.bin’ directive output may be truncated writing 4 bytes into a region of size between 0 and 29 [-Wformat-truncation=] 110 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); | ^~~~ drivers/gpu/drm/amd/amdgpu/../pm/swsmu/smu11/smu_v11_0.c:110:9: note: ‘snprintf’ output between 12 and 41 bytes into a destination of size 36 110 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Cc: Alex Deucher Cc: Christian König Suggested-by: Lijo Lazar Signed-off-by: Srinivasan Shanmugam Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index f6545093bfc1..5e5da9b16718 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -93,7 +93,7 @@ static void smu_v11_0_poll_baco_exit(struct smu_context *smu) int smu_v11_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[30]; + char ucode_prefix[25]; char fw_name[SMU_FW_NAME_LEN]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; -- cgit v1.2.3-70-g09d2 From eb4f6eca2632584b8260c175e8aac41ed36314be Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Fri, 22 Mar 2024 12:00:31 +0530 Subject: drm/amdgpu: Fix truncations in gfx_v11_0_init_microcode() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reducing the size of ucode_prefix to 25 in the gfx_v11_0_init_microcode function. This would ensure that the total number of characters being written into fw_name does not exceed its size of 40. Fixes the below with gcc W=1: drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c: In function ‘gfx_v11_0_early_init’: drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:523:54: warning: ‘_pfp.bin’ directive output may be truncated writing 8 bytes into a region of size between 4 and 33 [-Wformat-truncation=] 523 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix); | ^~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:523:9: note: ‘snprintf’ output between 16 and 45 bytes into a destination of size 40 523 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:540:54: warning: ‘_me.bin’ directive output may be truncated writing 7 bytes into a region of size between 4 and 33 [-Wformat-truncation=] 540 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix); | ^~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:540:9: note: ‘snprintf’ output between 15 and 44 bytes into a destination of size 40 540 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:557:70: warning: ‘_rlc.bin’ directive output may be truncated writing 8 bytes into a region of size between 4 and 33 [-Wformat-truncation=] 557 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); | ^~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:557:25: note: ‘snprintf’ output between 16 and 45 bytes into a destination of size 40 557 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:569:54: warning: ‘_mec.bin’ directive output may be truncated writing 8 bytes into a region of size between 4 and 33 [-Wformat-truncation=] 569 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); | ^~~~~~~~ drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c:569:9: note: ‘snprintf’ output between 16 and 45 bytes into a destination of size 40 569 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CC [M] drivers/gpu/drm/amd/amdgpu/../pm/powerplay/hwmgr/smu7_clockpowergating.o Cc: Alex Deucher Cc: Christian König Suggested-by: Lijo Lazar Signed-off-by: Srinivasan Shanmugam Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 1770e496c1b7..7a906318e451 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -510,7 +510,7 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev) static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) { char fw_name[40]; - char ucode_prefix[30]; + char ucode_prefix[25]; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; uint16_t version_major; -- cgit v1.2.3-70-g09d2 From 0355b24bdec3b69ba31375c83d94fa80ca2c7ae1 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Wed, 20 Mar 2024 13:32:21 -0500 Subject: drm/amd: Flush GFXOFF requests in prepare stage If the system hasn't entered GFXOFF when suspend starts it can cause hangs accessing GC and RLC during the suspend stage. Cc: # 6.1.y: 5095d5418193 ("drm/amd: Evict resources during PM ops prepare() callback") Cc: # 6.1.y: cb11ca3233aa ("drm/amd: Add concept of running prepare_suspend() sequence for IP blocks") Cc: # 6.1.y: 2ceec37b0e3d ("drm/amd: Add missing kernel doc for prepare_suspend()") Cc: # 6.1.y: 3a9626c816db ("drm/amd: Stop evicting resources on APUs in suspend") Cc: # 6.6.y: 5095d5418193 ("drm/amd: Evict resources during PM ops prepare() callback") Cc: # 6.6.y: cb11ca3233aa ("drm/amd: Add concept of running prepare_suspend() sequence for IP blocks") Cc: # 6.6.y: 2ceec37b0e3d ("drm/amd: Add missing kernel doc for prepare_suspend()") Cc: # 6.6.y: 3a9626c816db ("drm/amd: Stop evicting resources on APUs in suspend") Cc: # 6.1+ Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3132 Fixes: ab4750332dbe ("drm/amdgpu/sdma5.2: add begin/end_use ring callbacks") Reviewed-by: Alex Deucher Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f771b2042a43..12dc71a6b5db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4542,6 +4542,8 @@ int amdgpu_device_prepare(struct drm_device *dev) if (r) goto unprepare; + flush_delayed_work(&adev->gfx.gfx_off_delay_work); + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; -- cgit v1.2.3-70-g09d2 From e37f5bd8cbdc5f20ff86c13686b1a2b8f0f5bdc6 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Mon, 12 Feb 2024 14:43:08 -0500 Subject: drm/amd/display: Allow idle opts for no flip case on PSR panel [Why & How] There is a corner case where a single PSR panel fails to enter idle optimizations if the panel is not flipping (no planes or DPMS_OFF == true). This is because the panel will not enter PSR if it's not flipping, but this will prevent the FW idle opt path from being executed. To handle this case we will allow entry to idle opt from driver side even when a PSR panel is connected under the following scenarios: 1. Only a single PSR panel is connected 2. PSR panel is not flipping Reviewed-by: Samson Tam Acked-by: Tom Chung Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index c0b526cf1786..a5e92389615f 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -261,7 +261,9 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable) for (i = 0; i < dc->current_state->stream_count; i++) { /* MALL SS messaging is not supported with PSR at this time */ if (dc->current_state->streams[i] != NULL && - dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) + dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && + (dc->current_state->stream_count > 1 || (!dc->current_state->streams[i]->dpms_off && + dc->current_state->stream_status[i].plane_count > 0))) return false; } -- cgit v1.2.3-70-g09d2 From aca8a9b127ea8cae92a71b6e351232e3b9c2130a Mon Sep 17 00:00:00 2001 From: Hamza Mahfooz Date: Thu, 21 Mar 2024 16:09:21 -0400 Subject: drm/amd/display: fix IPX enablement We need to re-enable idle power optimizations after entering PSR. Since, we get kicked out of idle power optimizations before entering PSR (entering PSR requires us to write to DCN registers, which isn't allowed while we are in IPS). Fixes: a9b1a4f684b3 ("drm/amd/display: Add more checks for exiting idle in DC") Tested-by: Mark Broadworth Reviewed-by: Nicholas Kazlauskas Signed-off-by: Hamza Mahfooz Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 8 +++++--- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index a48a79e84e82..bfa090432ce2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -141,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) * amdgpu_dm_psr_enable() - enable psr f/w * @stream: stream state * - * Return: true if success */ -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +void amdgpu_dm_psr_enable(struct dc_stream_state *stream) { struct dc_link *link = stream->link; unsigned int vsync_rate_hz = 0; @@ -190,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) power_opt |= psr_power_opt_z10_static_screen; - return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + + if (link->ctx->dc->caps.ips_support) + dc_allow_idle_optimizations(link->ctx->dc, true); } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h index 6806b3c9c84b..1fdfd183c0d9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h @@ -32,7 +32,7 @@ #define AMDGPU_DM_PSR_ENTRY_DELAY 5 void amdgpu_dm_set_psr_caps(struct dc_link *link); -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +void amdgpu_dm_psr_enable(struct dc_stream_state *stream); bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); -- cgit v1.2.3-70-g09d2 From be524af47a82f48022c093ad7992b5eb501877b9 Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Wed, 21 Feb 2024 16:47:06 -0500 Subject: drm/amd/display: Update dcn351 to latest dcn35 config [why & how] There were some fixes in dcn35 that need to be ported over to dcn351 to prevent any regression. Signed-off-by: Sung Joon Kim Reviewed-by: Liu, Xi (Alex) Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c | 9 ++++++--- drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c | 2 +- .../gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c | 11 ++++++++--- 3 files changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index b6246406a042..b3ffab77cf88 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -402,6 +402,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc, clock_limits[i].socclk_mhz; dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = + clock_limits[i].dtbclk_mhz; dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = clk_table->num_entries; dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = @@ -414,6 +416,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc, clk_table->num_entries; dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels = + clk_table->num_entries; } } @@ -613,6 +617,7 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context) if (context->res_ctx.pipe_ctx[i].plane_state) plane_count++; } + /*dcn351 does not support z9/z10*/ if (context->stream_count == 0 || plane_count == 0) { support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY; @@ -626,11 +631,9 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context) dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; - /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/ - if (is_pwrseq0 && (is_psr || is_replay)) + if (is_pwrseq0 && (is_psr || is_replay)) support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW; - } context->bw_ctx.bw.dcn.clk.zstate_support = support; } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index ab17fa1c64e8..670255c9bc82 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .prepare_bandwidth = dcn35_prepare_bandwidth, .optimize_bandwidth = dcn35_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, + .set_drr = dcn35_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn35_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 2dfd73d5f767..fe13c891879b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -700,6 +700,8 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dcc = DCC_ENABLE, .disable_dpp_power_gate = true, .disable_hubp_power_gate = true, + .disable_optc_power_gate = true, /*should the same as above two*/ + .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/ .disable_clock_gate = false, .disable_dsc_power_gate = true, .vsr_support = true, @@ -742,12 +744,13 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ + .minimum_z8_residency_time = 2100, .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, - .disable_idle_power_optimizations = true, + .disable_idle_power_optimizations = false, .dmcub_emulation = false, .disable_boot_optimizations = false, .disable_unbounded_requesting = false, @@ -758,8 +761,10 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, .ignore_pg = true, .psp_disabled_wa = true, - .ips2_eval_delay_us = 200, - .ips2_entry_delay_us = 400 + .ips2_eval_delay_us = 2000, + .ips2_entry_delay_us = 800, + .disable_dmub_reallow_idle = true, + .static_screen_wait_frames = 2, }; static const struct dc_panel_config panel_config_defaults = { -- cgit v1.2.3-70-g09d2 From 865d38e690c664f08b26f75731223cc80ec589e8 Mon Sep 17 00:00:00 2001 From: Natanel Roizenman Date: Fri, 8 Mar 2024 12:41:04 -0500 Subject: drm/amd/display: Consolidate HPO enable/disable and restrict only to state transitions. [WHY] Previously, we'd disabled HPO whenever an HPO display was disconnected. This caused other HPO displays to blank whenever one was unplugged. [HOW] This change restricts HPO enable/disable to dce110_apply_ctx_to_hw and adds a helper function (dce110_is_hpo_enabled) that returns true if any HPO displays are present in a context. We compare the current and previous dc ctx to check whether HPO is transitioning from on to off or vice versa, and adjust the HPO state accordingly. Reviewed-by: Wenjing Liu Reviewed-by: Chris Park Acked-by: Tom Chung Signed-off-by: Natanel Roizenman Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 29 ++++++++++++++-------- .../drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 5 ---- 2 files changed, 19 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 0ba1feaf96c0..de5542c20103 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1192,16 +1192,6 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); } - - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - /* TODO: This looks like a bug to me as we are disabling HPO IO when - * we are just disabling a single HPO stream. Shouldn't we disable HPO - * HW control only when HPOs for all streams are disabled? - */ - if (pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control) - pipe_ctx->stream->ctx->dc->hwseq->funcs.setup_hpo_hw_control( - pipe_ctx->stream->ctx->dc->hwseq, false); - } } void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, @@ -2288,6 +2278,19 @@ static void dce110_setup_audio_dto( } } +static bool dce110_is_hpo_enabled(struct dc_state *context) +{ + int i; + + for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) { + if (context->res_ctx.is_hpo_dp_stream_enc_acquired[i]) { + return true; + } + } + + return false; +} + enum dc_status dce110_apply_ctx_to_hw( struct dc *dc, struct dc_state *context) @@ -2296,6 +2299,8 @@ enum dc_status dce110_apply_ctx_to_hw( struct dc_bios *dcb = dc->ctx->dc_bios; enum dc_status status; int i; + bool was_hpo_enabled = dce110_is_hpo_enabled(dc->current_state); + bool is_hpo_enabled = dce110_is_hpo_enabled(context); /* reset syncd pipes from disabled pipes */ if (dc->config.use_pipe_ctx_sync_logic) @@ -2338,6 +2343,10 @@ enum dc_status dce110_apply_ctx_to_hw( dce110_setup_audio_dto(dc, context); + if (dc->hwseq->funcs.setup_hpo_hw_control && was_hpo_enabled != is_hpo_enabled) { + dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, is_hpo_enabled); + } + for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i]; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 8b3536c380b8..eae71f448143 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -2892,11 +2892,6 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - if (dc->hwseq->funcs.setup_hpo_hw_control) - dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true); - } - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { dto_params.otg_inst = tg->inst; dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; -- cgit v1.2.3-70-g09d2 From 1ba65e749dc607d36a0b6e54f14171d52702247d Mon Sep 17 00:00:00 2001 From: Taimur Hassan Date: Thu, 29 Feb 2024 09:52:05 -0500 Subject: drm/amd/display: Send DTBCLK disable message on first commit [Why] Previous patch to allow DTBCLK disable didn't address boot case. Driver thinks DTBCLK is disabled by default, so we don't send disable message to PMFW. DTBCLK is then enabled at idle desktop on boot, burning power. [How] Set dtbclk_en to true on boot so that disable message is sent during first commit. Fixes: 27750e176a4f ("drm/amd/display: Allow DTBCLK disable for DCN35") Reviewed-by: Charlene Liu Acked-by: Tom Chung Signed-off-by: Taimur Hassan Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index c6030bed95a0..dafa07671d00 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -73,6 +73,8 @@ #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L +#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 + #define REG(reg_name) \ (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) @@ -412,9 +414,12 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs static void init_clk_states(struct clk_mgr *clk_mgr) { + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD) + clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk clk_mgr->clks.p_state_change_support = true; clk_mgr->clks.prev_p_state_change_support = true; -- cgit v1.2.3-70-g09d2 From fbc836cdbf9feaf15e84025c4ee52f425e861446 Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Tue, 12 Mar 2024 11:15:59 -0400 Subject: drm/amd/display: Remove read/write to external register [why&how] We need to remove the reference to these registers to prevent any usage in the future. Reviewed-by: Nicholas Kazlauskas Acked-by: Tom Chung Signed-off-by: Sung Joon Kim Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 38 ---------------------- .../drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c | 21 ------------ .../drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h | 2 -- .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 16 --------- .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h | 3 -- .../gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c | 2 -- .../drm/amd/display/dc/hwss/dcn351/dcn351_init.c | 2 -- drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h | 2 -- drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h | 2 -- 9 files changed, 88 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index dafa07671d00..928d61f9f858 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -842,35 +842,6 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base) } } -static void dcn35_set_ips_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr); - - if (dc->config.disable_ips == DMUB_IPS_ENABLE || - dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { - val = val & ~DMUB_IPS1_ALLOW_MASK; - val = val & ~DMUB_IPS2_ALLOW_MASK; - } - - if (!allow_idle) { - val |= DMUB_IPS1_ALLOW_MASK; - val |= DMUB_IPS2_ALLOW_MASK; - } - - dcn35_smu_write_ips_scratch(clk_mgr, val); -} - static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -890,13 +861,6 @@ static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base) return ips_supported; } -static uint32_t dcn35_get_ips_idle_state(struct clk_mgr *clk_mgr_base) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - - return dcn35_smu_read_ips_scratch(clk_mgr); -} - static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr) { init_clk_states(clk_mgr); @@ -984,8 +948,6 @@ static struct clk_mgr_funcs dcn35_funcs = { .set_low_power_state = dcn35_set_low_power_state, .exit_low_power_state = dcn35_exit_low_power_state, .is_ips_supported = dcn35_is_ips_supported, - .set_idle_state = dcn35_set_ips_idle_state, - .get_idle_state = dcn35_get_ips_idle_state }; struct clk_mgr_funcs dcn35_fpga_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c index 9e588c56c570..1399b41dfd1c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c @@ -487,24 +487,3 @@ int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr) //smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv); return retv; } - -void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param) -{ - if (!clk_mgr->smu_present) - return; - - REG_WRITE(MP1_SMN_C2PMSG_71, param); - //smu_print("%s: write_ips_scratch = %x\n", __func__, param); -} - -uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr) -{ - uint32_t retv; - - if (!clk_mgr->smu_present) - return 0; - - retv = REG_READ(MP1_SMN_C2PMSG_71); - //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv); - return retv; -} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h index 2b8e6959a03d..06cd3cc6d36e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h @@ -198,6 +198,4 @@ int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr); int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr); -void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param); -uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_35_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 2fb90c2330a8..66b162fe4700 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1336,22 +1336,6 @@ void dcn35_optimize_bandwidth( } } -void dcn35_set_idle_state(const struct dc *dc, bool allow_idle) -{ - // TODO: Find a more suitable communcation - if (dc->clk_mgr->funcs->set_idle_state) - dc->clk_mgr->funcs->set_idle_state(dc->clk_mgr, allow_idle); -} - -uint32_t dcn35_get_idle_state(const struct dc *dc) -{ - // TODO: Find a more suitable communcation - if (dc->clk_mgr->funcs->get_idle_state) - return dc->clk_mgr->funcs->get_idle_state(dc->clk_mgr); - - return 0; -} - void dcn35_set_drr(struct pipe_ctx **pipe_ctx, int num_pipes, struct dc_crtc_timing_adjust adjust) { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 7c33bea3f4e7..68f714143431 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -84,9 +84,6 @@ void dcn35_dsc_pg_control( unsigned int dsc_inst, bool power_on); -void dcn35_set_idle_state(const struct dc *dc, bool allow_idle); -uint32_t dcn35_get_idle_state(const struct dc *dc); - void dcn35_set_drr(struct pipe_ctx **pipe_ctx, int num_pipes, struct dc_crtc_timing_adjust adjust); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index dce620d359a6..19b861349954 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -121,8 +121,6 @@ static const struct hw_sequencer_funcs dcn35_funcs = { .hw_block_power_up = dcn35_hw_block_power_up, .hw_block_power_down = dcn35_hw_block_power_down, .root_clock_control = dcn35_root_clock_control, - .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state, .set_long_vtotal = dcn35_set_long_vblank, }; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index 670255c9bc82..b5f4bae16177 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -120,8 +120,6 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .hw_block_power_up = dcn35_hw_block_power_up, .hw_block_power_down = dcn35_hw_block_power_down, .root_clock_control = dcn35_root_clock_control, - .set_idle_state = dcn35_set_idle_state, - .get_idle_state = dcn35_get_idle_state }; static const struct hwseq_private_funcs dcn351_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 26ea86316e50..65df90842b83 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -424,8 +424,6 @@ struct hw_sequencer_funcs { struct pg_block_update *update_state); void (*root_clock_control)(struct dc *dc, struct pg_block_update *update_state, bool power_on); - void (*set_idle_state)(const struct dc *dc, bool allow_idle); - uint32_t (*get_idle_state)(const struct dc *dc); bool (*is_pipe_topology_transition_seamless)(struct dc *dc, const struct dc_state *cur_ctx, const struct dc_state *new_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 17e014d3bdc8..4f7480f60c85 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -281,8 +281,6 @@ struct clk_mgr_funcs { void (*set_low_power_state)(struct clk_mgr *clk_mgr); void (*exit_low_power_state)(struct clk_mgr *clk_mgr); bool (*is_ips_supported)(struct clk_mgr *clk_mgr); - void (*set_idle_state)(struct clk_mgr *clk_mgr, bool allow_idle); - uint32_t (*get_idle_state)(struct clk_mgr *clk_mgr); void (*init_clocks)(struct clk_mgr *clk_mgr); -- cgit v1.2.3-70-g09d2 From 8cffa89bd5e2827fefb0740a8375274230ad4c5f Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Thu, 22 Feb 2024 20:38:25 -0500 Subject: drm/amd/display: Expand DML2 callbacks [Why&How] These additional callbacks to DC will be required for the DML2 wrapper. Also consolidate common callbacks for projects to a single location for maintenance. Reviewed-by: Chaitanya Dhere Acked-by: Tom Chung Signed-off-by: Dillon Varone Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 33 +++++++++++++++++ drivers/gpu/drm/amd/display/dc/core/dc_state.c | 17 ++++++++- drivers/gpu/drm/amd/display/dc/dc_state.h | 2 +- drivers/gpu/drm/amd/display/dc/dc_state_priv.h | 2 ++ .../amd/display/dc/dcn32/dcn32_resource_helpers.c | 23 ++---------- drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c | 2 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 24 ++++++++++++- .../drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 4 ++- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 3 ++ drivers/gpu/drm/amd/display/dc/inc/resource.h | 6 ++++ .../amd/display/dc/resource/dcn32/dcn32_resource.c | 41 ++++++++++------------ .../amd/display/dc/resource/dcn32/dcn32_resource.h | 6 ++-- .../display/dc/resource/dcn321/dcn321_resource.c | 25 ++----------- .../amd/display/dc/resource/dcn35/dcn35_resource.c | 10 ++---- .../display/dc/resource/dcn351/dcn351_resource.c | 10 ++---- 15 files changed, 119 insertions(+), 89 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 96b4f68ec374..ce56e47cfd0a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -4993,3 +4993,36 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st return false; } + +void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options) +{ + dml2_options->callbacks.dc = dc; + dml2_options->callbacks.build_scaling_params = &resource_build_scaling_params; + dml2_options->callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; + dml2_options->callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; + dml2_options->callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; + dml2_options->callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dml2_options->callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dml2_options->callbacks.get_opp_head = &resource_get_opp_head; + dml2_options->callbacks.get_otg_master_for_stream = &resource_get_otg_master_for_stream; + dml2_options->callbacks.get_opp_heads_for_otg_master = &resource_get_opp_heads_for_otg_master; + dml2_options->callbacks.get_dpp_pipes_for_plane = &resource_get_dpp_pipes_for_plane; + dml2_options->callbacks.get_stream_status = &dc_state_get_stream_status; + dml2_options->callbacks.get_stream_from_id = &dc_state_get_stream_from_id; + + dml2_options->svp_pstate.callbacks.dc = dc; + dml2_options->svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; + dml2_options->svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; + dml2_options->svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; + dml2_options->svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; + dml2_options->svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; + dml2_options->svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; + dml2_options->svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; + dml2_options->svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; + dml2_options->svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; + dml2_options->svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; + dml2_options->svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; + dml2_options->svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; + dml2_options->svp_pstate.callbacks.remove_phantom_streams_and_planes = &dc_state_remove_phantom_streams_and_planes; + dml2_options->svp_pstate.callbacks.release_phantom_streams_and_planes = &dc_state_release_phantom_streams_and_planes; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index cce4e1c465b6..c5c078771b15 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -585,7 +585,7 @@ bool dc_state_add_all_planes_for_stream( */ struct dc_stream_status *dc_state_get_stream_status( struct dc_state *state, - struct dc_stream_state *stream) + const struct dc_stream_state *stream) { uint8_t i; @@ -868,3 +868,18 @@ void dc_state_release_phantom_streams_and_planes( for (i = 0; i < state->phantom_plane_count; i++) dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]); } + +struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id) +{ + struct dc_stream_state *stream = NULL; + int i; + + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i] && state->streams[i]->stream_id == id) { + stream = state->streams[i]; + break; + } + } + + return stream; +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h index d167fdbfa8a9..631d1a57263c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state.h @@ -74,5 +74,5 @@ bool dc_state_add_all_planes_for_stream( struct dc_stream_status *dc_state_get_stream_status( struct dc_state *state, - struct dc_stream_state *stream); + const struct dc_stream_state *stream); #endif /* _DC_STATE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h index c1f44e09a6c1..4da4babf0d18 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h @@ -29,6 +29,8 @@ #include "dc_state.h" #include "dc_stream.h" +struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state, unsigned int id); + /* Get the type of the provided resource (none, phantom, main) based on the provided * context. If the context is unavailable, determine only if phantom or not. */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index f98def6c8c2d..c831f6c69a17 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -35,25 +35,6 @@ static bool is_dual_plane(enum surface_pixel_format format) return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; } - -uint32_t dcn32_helper_mall_bytes_to_ways( - struct dc *dc, - uint32_t total_size_in_mall_bytes) -{ - uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways; - - /* add 2 lines for worst case alignment */ - cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; - - total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; - lines_per_way = total_cache_lines / dc->caps.cache_num_ways; - num_ways = cache_lines_used / lines_per_way; - if (cache_lines_used % lines_per_way > 0) - num_ways++; - - return num_ways; -} - uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -112,8 +93,10 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp( if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) { if (dc->debug.force_subvp_num_ways) { return dc->debug.force_subvp_num_ways; + } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) { + return dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); } else { - return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes); + return 0; } } else { return 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 72cca367062e..018cf97c0251 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -681,7 +681,7 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d } } -bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate) +bool dml2_validate(struct dc *in_dc, struct dc_state *context, bool fast_validate) { bool out = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index cc662d682fd4..800d2ce4856c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -88,6 +88,19 @@ struct dml2_dc_callbacks { int (*get_odm_slice_index)(const struct pipe_ctx *opp_head); int (*get_mpc_slice_index)(const struct pipe_ctx *dpp_pipe); struct pipe_ctx *(*get_opp_head)(const struct pipe_ctx *pipe_ctx); + struct pipe_ctx *(*get_otg_master_for_stream)( + struct resource_context *res_ctx, + const struct dc_stream_state *stream); + int (*get_opp_heads_for_otg_master)(const struct pipe_ctx *otg_master, + struct resource_context *res_ctx, + struct pipe_ctx *opp_heads[MAX_PIPES]); + int (*get_dpp_pipes_for_plane)(const struct dc_plane_state *plane, + struct resource_context *res_ctx, + struct pipe_ctx *dpp_pipes[MAX_PIPES]); + struct dc_stream_status *(*get_stream_status)( + struct dc_state *state, + const struct dc_stream_state *stream); + struct dc_stream_state *(*get_stream_from_id)(const struct dc_state *state, unsigned int id); }; struct dml2_dc_svp_callbacks { @@ -121,6 +134,15 @@ struct dml2_dc_svp_callbacks { enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx); enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream); struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream); + bool (*remove_phantom_streams_and_planes)( + struct dc *dc, + struct dc_state *state); + void (*release_phantom_streams_and_planes)( + struct dc *dc, + struct dc_state *state); + unsigned int (*calculate_mall_ways_from_bytes)( + const struct dc *dc, + unsigned int total_size_in_mall_bytes); }; struct dml2_clks_table_entry { @@ -242,7 +264,7 @@ void dml2_reinit(const struct dc *in_dc, * separate dc_states for validation. * Return: True if mode is supported, false otherwise. */ -bool dml2_validate(const struct dc *in_dc, +bool dml2_validate(struct dc *in_dc, struct dc_state *context, bool fast_validate); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index a5e92389615f..367dcaeaf186 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -239,8 +239,10 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c // Convert number of cache lines required to number of ways if (dc->debug.force_mall_ss_num_ways > 0) { num_ways = dc->debug.force_mall_ss_num_ways; + } else if (dc->res_pool->funcs->calculate_mall_ways_from_bytes) { + num_ways = dc->res_pool->funcs->calculate_mall_ways_from_bytes(dc, mall_ss_size_bytes); } else { - num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes); + num_ways = 0; } return num_ways; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 34764094f546..a66676c00c6c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -90,6 +90,9 @@ struct resource_funcs { void (*update_soc_for_wm_a)( struct dc *dc, struct dc_state *context); + unsigned int (*calculate_mall_ways_from_bytes)( + const struct dc *dc, + unsigned int total_size_in_mall_bytes); /** * @populate_dml_pipes - Populate pipe data struct * diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 77a60aa9f27b..d17fa73d72a3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -615,4 +615,10 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, struct pipe_ctx *pipe_ctx); bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream); + +/* Setup dc callbacks for dml2 + * @dc: the display core structure + * @dml2_options: struct to hold callbacks + */ +void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuration_options *dml2_options); #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 1c657fe4a9bb..da15ad845147 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1912,6 +1912,22 @@ int dcn32_populate_dml_pipes_from_context( return pipe_cnt; } +unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes) +{ + uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways; + + /* add 2 lines for worst case alignment */ + cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2; + + total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size; + lines_per_way = total_cache_lines / dc->caps.cache_num_ways; + num_ways = cache_lines_used / lines_per_way; + if (cache_lines_used % lines_per_way > 0) + num_ways++; + + return num_ways; +} + static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, .get_subvp_en = dcn32_subvp_in_use, @@ -1960,6 +1976,7 @@ static struct resource_funcs dcn32_res_pool_funcs = { .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .add_phantom_pipes = dcn32_add_phantom_pipes, .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, + .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -2363,30 +2380,10 @@ static bool dcn32_resource_construct( dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; - - dc->dml2_options.svp_pstate.callbacks.dc = dc; - dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; - dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; - dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; - dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; + dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes; dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 2258c5c7212d..286e20ad46ed 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -113,10 +113,6 @@ void dcn32_calculate_wm_and_dlg( int pipe_cnt, int vlevel); -uint32_t dcn32_helper_mall_bytes_to_ways( - struct dc *dc, - uint32_t total_size_in_mall_bytes); - uint32_t dcn32_helper_calculate_mall_bytes_for_cursor( struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -184,6 +180,8 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); +unsigned int dcn32_calculate_mall_ways_from_bytes(const struct dc *dc, unsigned int total_size_in_mall_bytes); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 6c8129734163..f7339b141939 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1610,6 +1610,7 @@ static struct resource_funcs dcn321_res_pool_funcs = { .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .add_phantom_pipes = dcn32_add_phantom_pipes, .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params, + .calculate_mall_ways_from_bytes = dcn32_calculate_mall_ways_from_bytes, }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -2000,30 +2001,10 @@ static bool dcn321_resource_construct( dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; - - dc->dml2_options.svp_pstate.callbacks.dc = dc; - dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params; - dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream; - dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane; - dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream; dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc; - dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type; - dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type; - dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream; + dc->dml2_options.svp_pstate.callbacks.calculate_mall_ways_from_bytes = pool->base.funcs->calculate_mall_ways_from_bytes; dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us; dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 49cccf3c0f9c..91c6eff79282 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -2138,15 +2138,9 @@ static bool dcn35_resource_construct( dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; + dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index fe13c891879b..07ac9f3130b7 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -2121,15 +2121,9 @@ static bool dcn351_resource_construct( dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm; - dc->dml2_options.callbacks.dc = dc; - dc->dml2_options.callbacks.build_scaling_params = &resource_build_scaling_params; + resource_init_common_dml2_callbacks(dc, &dc->dml2_options); dc->dml2_options.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch = &dcn30_can_support_mclk_switch_using_fw_based_vblank_stretch; - dc->dml2_options.callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; - dc->dml2_options.callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; - dc->dml2_options.callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; - dc->dml2_options.callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; - dc->dml2_options.callbacks.get_odm_slice_index = &resource_get_odm_slice_index; - dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head; + dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ -- cgit v1.2.3-70-g09d2 From 9d43241953f729626a7d452417a2832092a9ec1d Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Mon, 11 Mar 2024 12:37:57 -0400 Subject: drm/amd/display: Refactor DML2 interfaces [Why&How} Some interfaces needed changes to support future architectures. Reviewed-by: Chaitanya Dhere Acked-by: Tom Chung Signed-off-by: Dillon Varone Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_state.c | 14 ++++++------- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 2 +- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dc_plane.h | 2 +- drivers/gpu/drm/amd/display/dc/dc_state.h | 4 ++-- drivers/gpu/drm/amd/display/dc/dc_state_priv.h | 10 +++++----- .../amd/display/dc/dml2/dml2_dc_resource_mgmt.c | 23 ++++++++++++---------- drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c | 2 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 12 +++++------ 9 files changed, 37 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index c5c078771b15..d546ea71026d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -340,7 +340,7 @@ void dc_state_release(struct dc_state *state) * dc_state_add_stream() - Add a new dc_stream_state to a dc_state. */ enum dc_status dc_state_add_stream( - struct dc *dc, + const struct dc *dc, struct dc_state *state, struct dc_stream_state *stream) { @@ -369,7 +369,7 @@ enum dc_status dc_state_add_stream( * dc_state_remove_stream() - Remove a stream from a dc_state. */ enum dc_status dc_state_remove_stream( - struct dc *dc, + const struct dc *dc, struct dc_state *state, struct dc_stream_state *stream) { @@ -679,7 +679,7 @@ void dc_state_release_phantom_stream(const struct dc *dc, dc_stream_release(phantom_stream); } -struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc, +struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc, struct dc_state *state, struct dc_plane_state *main_plane) { @@ -715,7 +715,7 @@ void dc_state_release_phantom_plane(const struct dc *dc, } /* add phantom streams to context and generate correct meta inside dc_state */ -enum dc_status dc_state_add_phantom_stream(struct dc *dc, +enum dc_status dc_state_add_phantom_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *phantom_stream, struct dc_stream_state *main_stream) @@ -741,7 +741,7 @@ enum dc_status dc_state_add_phantom_stream(struct dc *dc, return res; } -enum dc_status dc_state_remove_phantom_stream(struct dc *dc, +enum dc_status dc_state_remove_phantom_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *phantom_stream) { @@ -835,7 +835,7 @@ bool dc_state_add_all_phantom_planes_for_stream( } bool dc_state_remove_phantom_streams_and_planes( - struct dc *dc, + const struct dc *dc, struct dc_state *state) { int i; @@ -857,7 +857,7 @@ bool dc_state_remove_phantom_streams_and_planes( } void dc_state_release_phantom_streams_and_planes( - struct dc *dc, + const struct dc *dc, struct dc_state *state) { int i; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index 19140fb65787..acc7a8baa169 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -103,7 +103,7 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, /*register_flip_interrupt(surface);*/ } -struct dc_plane_state *dc_create_plane_state(struct dc *dc) +struct dc_plane_state *dc_create_plane_state(const struct dc *dc) { struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index e0b44c43e959..52fd2ebdcdbb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -996,6 +996,7 @@ struct dc_debug_options { bool enable_idle_reg_checks; unsigned int static_screen_wait_frames; bool force_chroma_subsampling_1tap; + bool disable_422_left_edge_pixel; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h index ef380cae816a..44afcd989224 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_plane.h +++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h @@ -29,7 +29,7 @@ #include "dc.h" #include "dc_hw_types.h" -struct dc_plane_state *dc_create_plane_state(struct dc *dc); +struct dc_plane_state *dc_create_plane_state(const struct dc *dc); const struct dc_plane_status *dc_plane_get_status( const struct dc_plane_state *plane_state); void dc_plane_state_retain(struct dc_plane_state *plane_state); diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h index 631d1a57263c..a8979c9defe4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state.h @@ -39,12 +39,12 @@ void dc_state_destruct(struct dc_state *state); void dc_state_retain(struct dc_state *state); void dc_state_release(struct dc_state *state); -enum dc_status dc_state_add_stream(struct dc *dc, +enum dc_status dc_state_add_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *stream); enum dc_status dc_state_remove_stream( - struct dc *dc, + const struct dc *dc, struct dc_state *state, struct dc_stream_state *stream); diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h index 4da4babf0d18..615086d74d32 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h @@ -47,7 +47,7 @@ struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state * struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *main_stream); -struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc, +struct dc_plane_state *dc_state_create_phantom_plane(const struct dc *dc, struct dc_state *state, struct dc_plane_state *main_plane); @@ -60,11 +60,11 @@ void dc_state_release_phantom_plane(const struct dc *dc, struct dc_plane_state *phantom_plane); /* add/remove phantom stream to context and generate subvp meta data */ -enum dc_status dc_state_add_phantom_stream(struct dc *dc, +enum dc_status dc_state_add_phantom_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *phantom_stream, struct dc_stream_state *main_stream); -enum dc_status dc_state_remove_phantom_stream(struct dc *dc, +enum dc_status dc_state_remove_phantom_stream(const struct dc *dc, struct dc_state *state, struct dc_stream_state *phantom_stream); @@ -94,11 +94,11 @@ bool dc_state_add_all_phantom_planes_for_stream( struct dc_state *state); bool dc_state_remove_phantom_streams_and_planes( - struct dc *dc, + const struct dc *dc, struct dc_state *state); void dc_state_release_phantom_streams_and_planes( - struct dc *dc, + const struct dc *dc, struct dc_state *state); #endif /* _DC_STATE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index a52c594e1ba4..b64e0160d482 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -794,7 +794,7 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state } static unsigned int get_mpc_factor(struct dml2_context *ctx, - const struct dc_state *state, + struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_stream_status *status, @@ -805,10 +805,10 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx, unsigned int cfg_idx; unsigned int mpc_factor; - get_plane_id(ctx, state, status->plane_states[plane_idx], - stream->stream_id, plane_idx, &plane_id); - cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); if (ctx->architecture == dml2_architecture_20) { + get_plane_id(ctx, state, status->plane_states[plane_idx], + stream->stream_id, plane_idx, &plane_id); + cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id); mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx]; } else { mpc_factor = 1; @@ -824,14 +824,16 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx, static unsigned int get_odm_factor( const struct dml2_context *ctx, + struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_stream_state *stream) { - unsigned int cfg_idx = find_disp_cfg_idx_by_stream_id( - mapping, stream->stream_id); + unsigned int cfg_idx; - if (ctx->architecture == dml2_architecture_20) + if (ctx->architecture == dml2_architecture_20) { + cfg_idx = find_disp_cfg_idx_by_stream_id( + mapping, stream->stream_id); switch (disp_cfg->hw.ODMMode[cfg_idx]) { case dml_odm_mode_bypass: return 1; @@ -842,6 +844,7 @@ static unsigned int get_odm_factor( default: break; } + } ASSERT(false); return 1; } @@ -850,7 +853,7 @@ static void populate_mpc_factors_for_stream( struct dml2_context *ctx, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, - const struct dc_state *state, + struct dc_state *state, unsigned int stream_idx, unsigned int odm_factor, unsigned int mpc_factors[MAX_PIPES]) @@ -870,14 +873,14 @@ static void populate_mpc_factors_for_stream( static void populate_odm_factors(const struct dml2_context *ctx, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, - const struct dc_state *state, + struct dc_state *state, unsigned int odm_factors[MAX_PIPES]) { int i; for (i = 0; i < state->stream_count; i++) odm_factors[i] = get_odm_factor( - ctx, disp_cfg, mapping, state->streams[i]); + ctx, state, disp_cfg, mapping, state->streams[i]); } static bool map_dc_pipes_for_stream(struct dml2_context *ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 018cf97c0251..72cca367062e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -681,7 +681,7 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d } } -bool dml2_validate(struct dc *in_dc, struct dc_state *context, bool fast_validate) +bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate) { bool out = false; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 800d2ce4856c..6e97337863e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -109,10 +109,10 @@ struct dml2_dc_svp_callbacks { struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc, struct dc_state *state, struct dc_stream_state *main_stream); - struct dc_plane_state* (*create_phantom_plane)(struct dc *dc, + struct dc_plane_state* (*create_phantom_plane)(const struct dc *dc, struct dc_state *state, struct dc_plane_state *main_plane); - enum dc_status (*add_phantom_stream)(struct dc *dc, + enum dc_status (*add_phantom_stream)(const struct dc *dc, struct dc_state *state, struct dc_stream_state *phantom_stream, struct dc_stream_state *main_stream); @@ -121,7 +121,7 @@ struct dml2_dc_svp_callbacks { struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context); - enum dc_status (*remove_phantom_stream)(struct dc *dc, + enum dc_status (*remove_phantom_stream)(const struct dc *dc, struct dc_state *state, struct dc_stream_state *stream); void (*release_phantom_plane)(const struct dc *dc, @@ -135,10 +135,10 @@ struct dml2_dc_svp_callbacks { enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream); struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream); bool (*remove_phantom_streams_and_planes)( - struct dc *dc, + const struct dc *dc, struct dc_state *state); void (*release_phantom_streams_and_planes)( - struct dc *dc, + const struct dc *dc, struct dc_state *state); unsigned int (*calculate_mall_ways_from_bytes)( const struct dc *dc, @@ -264,7 +264,7 @@ void dml2_reinit(const struct dc *in_dc, * separate dc_states for validation. * Return: True if mode is supported, false otherwise. */ -bool dml2_validate(struct dc *in_dc, +bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate); -- cgit v1.2.3-70-g09d2 From e4067957363742c762eb68041367e5ea9a465f60 Mon Sep 17 00:00:00 2001 From: Sohaib Nadeem Date: Tue, 12 Mar 2024 17:32:28 -0400 Subject: drm/amd/display: Added missing null checks [why&how] Add the missing null check before dereference for dc_stream_status* Fixes: 2d5bb791e24f ("drm/amd/display: Implement update_planes_and_stream_v3 sequence") Reviewed-by: Josip Pavic Acked-by: Tom Chung Signed-off-by: Sohaib Nadeem Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 7222917e48bb..9030251388db 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -4291,6 +4291,9 @@ static int initialize_empty_surface_updates( struct dc_stream_status *status = dc_stream_get_status(stream); int i; + if (!status) + return 0; + for (i = 0; i < status->plane_count; i++) srf_updates[i].surface = status->plane_states[i]; -- cgit v1.2.3-70-g09d2 From 02367f52901932674ff2a9c5208b6c11ccf39802 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Tue, 12 Mar 2024 15:31:59 -0400 Subject: drm/amd/display: fix a dereference of a NULL pointer [why&how] In some platform out_transfer_func may not be popualted. We need to check for null before dereferencing it. Fixes: d2dea1f14038 ("drm/amd/display: Generalize new minimal transition path") Reviewed-by: Alvin Lee Acked-by: Tom Chung Signed-off-by: Wenjing Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 9030251388db..8d042651b6fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3057,7 +3057,8 @@ static void backup_planes_and_stream_state( scratch->blend_tf[i] = *status->plane_states[i]->blend_tf; } scratch->stream_state = *stream; - scratch->out_transfer_func = *stream->out_transfer_func; + if (stream->out_transfer_func) + scratch->out_transfer_func = *stream->out_transfer_func; } static void restore_planes_and_stream_state( @@ -3079,7 +3080,8 @@ static void restore_planes_and_stream_state( *status->plane_states[i]->blend_tf = scratch->blend_tf[i]; } *stream = scratch->stream_state; - *stream->out_transfer_func = scratch->out_transfer_func; + if (stream->out_transfer_func) + *stream->out_transfer_func = scratch->out_transfer_func; } /** -- cgit v1.2.3-70-g09d2 From 57b1ce8384c71d59c48081a9cca91d22f1984a2d Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 6 Mar 2024 15:04:58 -0500 Subject: drm/amd/display: fix nonseamless transition from ODM + MPO to ODM + subvp [why] when ODM + MPO is used for all 4 available pipes. Pipe transition will be nonseamless. Phantom OTG master pipe reuses the secondary OPP head pipe. There is no possible seamless path to transit to the new state. The correct logic would be to reuse a secondary DPP pipe as the phantom OTG master pipe. This way we are able to first transit the minimal transtion state of new and then transit to new state seamlessly. current New (nonseamless) ________________________ ________________________ | plane0 slice0 stream0| | plane0 slice0 stream0| |DPP0----OPP0----OTG0----| |DPP0----OPP0----OTG0----| | plane1 | | | | plane0 slice1 | | |DPP2----| | | |DPP2----OPP2----| | | plane0 slice1 | | | plane0 slice0 stream1| |DPP1----OPP1----| | |DPP1----OPP1----OTG1----| | plane1 | | | plane0 slice1 | | |DPP3----| | |DPP3----OPP3----| | |________________________| |________________________| New (seamless) New (minimal transition) ________________________ ________________________ | plane0 slice0 stream0| | plane0 slice0 stream0| |DPP0----OPP0----OTG0----| |DPP0----OPP0----OTG0----| | plane0 slice1 | | | plane0 slice1 | | |DPP1----OPP1----| | |DPP1----OPP1----| | | plane0 slice0 stream1| |________________________| |DPP2----OPP2----OTG2----| | plane0 slice1 | | |DPP3----OPP3----| | |________________________| [how] Try to acquire free pipes used as secondary DPP pipes from current state before try to acquire any free pipes for new OTG master pipe. Reviewed-by: Alvin Lee Acked-by: Tom Chung Signed-off-by: Wenjing Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 44 +++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/inc/resource.h | 11 ++++++ 2 files changed, 55 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index ce56e47cfd0a..fa6e6184c437 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1795,6 +1795,30 @@ int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( return free_pipe_idx; } +int resource_find_free_pipe_used_as_cur_sec_dpp( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool) +{ + int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND; + const struct pipe_ctx *new_pipe, *cur_pipe; + int i; + + for (i = 0; i < pool->pipe_count; i++) { + cur_pipe = &cur_res_ctx->pipe_ctx[i]; + new_pipe = &new_res_ctx->pipe_ctx[i]; + + if (resource_is_pipe_type(cur_pipe, DPP_PIPE) && + !resource_is_pipe_type(cur_pipe, OPP_HEAD) && + resource_is_pipe_type(new_pipe, FREE_PIPE)) { + free_pipe_idx = i; + break; + } + } + + return free_pipe_idx; +} + int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, @@ -3372,11 +3396,31 @@ static bool acquire_otg_master_pipe_for_stream( * any free pipes already used in current context as this could tear * down exiting ODM/MPC/MPO configuration unnecessarily. */ + + /* + * Try to acquire the same OTG master already in use. This is not + * optimal because resetting an enabled OTG master pipe for a new stream + * requires an extra frame of wait. However there are test automation + * and eDP assumptions that rely on reusing the same OTG master pipe + * during mode change. We have to keep this logic as is for now. + */ pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + /* + * Try to acquire a pipe not used in current resource context to avoid + * pipe swapping. + */ if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx( &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); + /* + * If pipe swapping is unavoidable, try to acquire pipe used as + * secondary DPP pipe in current state as we prioritize to support more + * streams over supporting MPO planes. + */ + if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) + pipe_idx = resource_find_free_pipe_used_as_cur_sec_dpp( + &cur_ctx->res_ctx, &new_ctx->res_ctx, pool); if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND) pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool); if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) { diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index d17fa73d72a3..7cb0ed612716 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -508,6 +508,17 @@ int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx( struct resource_context *new_res_ctx, const struct resource_pool *pool); +/* + * Look for a free pipe in new resource context that is used as a secondary DPP + * pipe in current resource context. + * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise + * pipe idx of the free pipe + */ +int resource_find_free_pipe_used_as_cur_sec_dpp( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool); + /* * Look for a free pipe in new resource context that is used as a secondary DPP * pipe in any MPCC combine in current resource context. -- cgit v1.2.3-70-g09d2 From 506d32ee9f9b65c9680ff15ceb8c4c1e91fe71d8 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Tue, 12 Mar 2024 14:21:07 -0400 Subject: drm/amd/display: build scaling params when a new plane is appended [why & how] We are boundling changes in plane state and build scaling params together. This is to simplify DML code so DML doesn't need to build scaling params. We are also avoiding rebuilding scaling params for planes without scaling changes. Reviewed-by: Dillon Varone Acked-by: Tom Chung Signed-off-by: Wenjing Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 5 ++++- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 10 ++++++++-- 2 files changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 8d042651b6fe..af257b58ac45 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3236,7 +3236,10 @@ static bool update_planes_and_stream_state(struct dc *dc, for (i = 0; i < surface_count; i++) { struct dc_plane_state *surface = srf_updates[i].surface; - if (update_type >= UPDATE_TYPE_MED) { + if (update_type != UPDATE_TYPE_MED) + continue; + if (surface->update_flags.bits.clip_size_change || + surface->update_flags.bits.position_change) { for (j = 0; j < dc->res_pool->pipe_count; j++) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j]; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index fa6e6184c437..2b01b4a3861d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2649,13 +2649,19 @@ bool resource_append_dpp_pipes_for_plane_composition( struct pipe_ctx *otg_master_pipe, struct dc_plane_state *plane_state) { + bool success; if (otg_master_pipe->plane_state == NULL) - return add_plane_to_opp_head_pipes(otg_master_pipe, + success = add_plane_to_opp_head_pipes(otg_master_pipe, plane_state, new_ctx); else - return acquire_secondary_dpp_pipes_and_add_plane( + success = acquire_secondary_dpp_pipes_and_add_plane( otg_master_pipe, plane_state, new_ctx, cur_ctx, pool); + if (success) + /* when appending a plane mpc slice count changes from 0 to 1 */ + success = update_pipe_params_after_mpc_slice_count_change( + plane_state, new_ctx, pool); + return success; } void resource_remove_dpp_pipes_for_plane_composition( -- cgit v1.2.3-70-g09d2 From 9712b64d6f3f173aff10322fd22dc3678758b8c3 Mon Sep 17 00:00:00 2001 From: George Shen Date: Fri, 16 Feb 2024 19:37:03 -0500 Subject: drm/amd/display: Remove MPC rate control logic from DCN30 and above [Why] MPC flow rate control is not needed for DCN30 and above. Current logic that uses it can result in underflow for certain edge cases (such as DSC N422 + ODM combine + 422 left edge pixel). [How] Remove MPC flow rate control logic and programming for DCN30 and above. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Wenjing Liu Acked-by: Tom Chung Signed-off-by: George Shen Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c | 54 +++++++++++++--------- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h | 14 +++--- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c | 5 +- .../drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c | 41 ---------------- .../drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 41 ---------------- .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 41 ---------------- 6 files changed, 41 insertions(+), 155 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index bf3386cd444d..5ebb57303130 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -44,6 +44,36 @@ #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) +void mpc3_mpc_init(struct mpc *mpc) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + int opp_id; + + mpc1_mpc_init(mpc); + + for (opp_id = 0; opp_id < MAX_OPP; opp_id++) { + if (REG(MUX[opp_id])) + /* disable mpc out rate and flow control */ + REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE, + 1, MPC_OUT_FLOW_CONTROL_COUNT, 0); + } +} + +void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + mpc1_mpc_init_single_inst(mpc, mpcc_id); + + /* assuming mpc out mux is connected to opp with the same index at this + * point in time (e.g. transitioning from vbios to driver) + */ + if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id])) + /* disable mpc out rate and flow control */ + REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE, + 1, MPC_OUT_FLOW_CONTROL_COUNT, 0); +} + bool mpc3_is_dwb_idle( struct mpc *mpc, int dwb_id) @@ -80,25 +110,6 @@ void mpc3_disable_dwb_mux( MPC_DWB0_MUX, 0xf); } -void mpc3_set_out_rate_control( - struct mpc *mpc, - int opp_id, - bool enable, - bool rate_2x_mode, - struct mpc_dwb_flow_control *flow_control) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_UPDATE_2(MUX[opp_id], - MPC_OUT_RATE_CONTROL_DISABLE, !enable, - MPC_OUT_RATE_CONTROL, rate_2x_mode); - - if (flow_control) - REG_UPDATE_2(MUX[opp_id], - MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode, - MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1); -} - enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id) { /*Contrary to DCN2 and DCN1 wherein a single status register field holds this info; @@ -1490,8 +1501,8 @@ static const struct mpc_funcs dcn30_mpc_funcs = { .read_mpcc_state = mpc3_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, - .mpc_init = mpc1_mpc_init, - .mpc_init_single_inst = mpc1_mpc_init_single_inst, + .mpc_init = mpc3_mpc_init, + .mpc_init_single_inst = mpc3_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, @@ -1508,7 +1519,6 @@ static const struct mpc_funcs dcn30_mpc_funcs = { .set_dwb_mux = mpc3_set_dwb_mux, .disable_dwb_mux = mpc3_disable_dwb_mux, .is_dwb_idle = mpc3_is_dwb_idle, - .set_out_rate_control = mpc3_set_out_rate_control, .set_gamut_remap = mpc3_set_gamut_remap, .program_shaper = mpc3_program_shaper, .acquire_rmu = mpcc3_acquire_rmu, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h index 9cb96ae95a2f..ce93003dae01 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h @@ -1007,6 +1007,13 @@ void dcn30_mpc_construct(struct dcn30_mpc *mpc30, int num_mpcc, int num_rmu); +void mpc3_mpc_init( + struct mpc *mpc); + +void mpc3_mpc_init_single_inst( + struct mpc *mpc, + unsigned int mpcc_id); + bool mpc3_program_shaper( struct mpc *mpc, const struct pwl_params *params, @@ -1078,13 +1085,6 @@ bool mpc3_is_dwb_idle( struct mpc *mpc, int dwb_id); -void mpc3_set_out_rate_control( - struct mpc *mpc, - int opp_id, - bool enable, - bool rate_2x_mode, - struct mpc_dwb_flow_control *flow_control); - void mpc3_power_on_ogam_lut( struct mpc *mpc, int mpcc_id, bool power_on); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c index e789e654c387..e408e859b355 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c @@ -47,7 +47,7 @@ void mpc32_mpc_init(struct mpc *mpc) struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int mpcc_id; - mpc1_mpc_init(mpc); + mpc3_mpc_init(mpc); if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) { @@ -991,7 +991,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, .mpc_init = mpc32_mpc_init, - .mpc_init_single_inst = mpc1_mpc_init_single_inst, + .mpc_init_single_inst = mpc3_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, @@ -1008,7 +1008,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .set_dwb_mux = mpc3_set_dwb_mux, .disable_dwb_mux = mpc3_disable_dwb_mux, .is_dwb_idle = mpc3_is_dwb_idle, - .set_out_rate_control = mpc3_set_out_rate_control, .set_gamut_remap = mpc3_set_gamut_remap, .program_shaper = mpc32_program_shaper, .program_3dlut = mpc32_program_3dlut, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index 3a9cc8ac0c07..093f4387553c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -69,29 +69,6 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name -static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, - int opp_cnt) -{ - bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); - int flow_ctrl_cnt; - - if (opp_cnt >= 2) - hblank_halved = true; - - flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - - stream->timing.h_border_left - - stream->timing.h_border_right; - - if (hblank_halved) - flow_ctrl_cnt /= 2; - - /* ODM combine 4:1 case */ - if (opp_cnt == 4) - flow_ctrl_cnt /= 2; - - return flow_ctrl_cnt; -} - static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; @@ -183,10 +160,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; - bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); - struct mpc_dwb_flow_control flow_control; - struct mpc *mpc = dc->res_pool->mpc; - int i; opp_cnt = get_odm_config(pipe_ctx, opp_inst); @@ -199,20 +172,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; - flow_control.flow_ctrl_mode = 0; - flow_control.flow_ctrl_cnt0 = 0x80; - flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); - if (mpc->funcs->set_out_rate_control) { - for (i = 0; i < opp_cnt; ++i) { - mpc->funcs->set_out_rate_control( - mpc, opp_inst[i], - true, - rate_control_2x_pclk, - &flow_control); - } - } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( odm_pipe->stream_res.opp, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 367dcaeaf186..62ff99463834 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -970,29 +970,6 @@ void dcn32_init_hw(struct dc *dc) } } -static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, - int opp_cnt) -{ - bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); - int flow_ctrl_cnt; - - if (opp_cnt >= 2) - hblank_halved = true; - - flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - - stream->timing.h_border_left - - stream->timing.h_border_right; - - if (hblank_halved) - flow_ctrl_cnt /= 2; - - /* ODM combine 4:1 case */ - if (opp_cnt == 4) - flow_ctrl_cnt /= 2; - - return flow_ctrl_cnt; -} - static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; @@ -1107,10 +1084,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; - bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); - struct mpc_dwb_flow_control flow_control; - struct mpc *mpc = dc->res_pool->mpc; - int i; opp_cnt = get_odm_config(pipe_ctx, opp_inst); @@ -1123,20 +1096,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; - flow_control.flow_ctrl_mode = 0; - flow_control.flow_ctrl_cnt0 = 0x80; - flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); - if (mpc->funcs->set_out_rate_control) { - for (i = 0; i < opp_cnt; ++i) { - mpc->funcs->set_out_rate_control( - mpc, opp_inst[i], - true, - rate_control_2x_pclk, - &flow_control); - } - } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( odm_pipe->stream_res.opp, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 66b162fe4700..629a702bb3fa 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -358,29 +358,6 @@ void dcn35_init_hw(struct dc *dc) } } -static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, - int opp_cnt) -{ - bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); - int flow_ctrl_cnt; - - if (opp_cnt >= 2) - hblank_halved = true; - - flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - - stream->timing.h_border_left - - stream->timing.h_border_right; - - if (hblank_halved) - flow_ctrl_cnt /= 2; - - /* ODM combine 4:1 case */ - if (opp_cnt == 4) - flow_ctrl_cnt /= 2; - - return flow_ctrl_cnt; -} - static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; @@ -474,10 +451,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; - bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); - struct mpc_dwb_flow_control flow_control; - struct mpc *mpc = dc->res_pool->mpc; - int i; opp_cnt = get_odm_config(pipe_ctx, opp_inst); @@ -490,20 +463,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; - flow_control.flow_ctrl_mode = 0; - flow_control.flow_ctrl_cnt0 = 0x80; - flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); - if (mpc->funcs->set_out_rate_control) { - for (i = 0; i < opp_cnt; ++i) { - mpc->funcs->set_out_rate_control( - mpc, opp_inst[i], - true, - rate_control_2x_pclk, - &flow_control); - } - } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( odm_pipe->stream_res.opp, -- cgit v1.2.3-70-g09d2 From a13ad81951c1334a2ddd0225929552f2eb7f074c Mon Sep 17 00:00:00 2001 From: Roman Li Date: Wed, 13 Mar 2024 18:35:13 -0400 Subject: drm/amd/display: Fix bounds check for dcn35 DcfClocks [Why] NumFclkLevelsEnabled is used for DcfClocks bounds check instead of designated NumDcfClkLevelsEnabled. That can cause array index out-of-bounds access. [How] Use designated variable for dcn35 DcfClocks bounds check. Fixes: a8edc9cc0b14 ("drm/amd/display: Fix array-index-out-of-bounds in dcn35_clkmgr") Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Sun peng Li Acked-by: Tom Chung Signed-off-by: Roman Li Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 928d61f9f858..8efde1cfb49a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -715,7 +715,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk clock_table->NumFclkLevelsEnabled; max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk); - num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS : + num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS : clock_table->NumDcfClkLevelsEnabled; for (i = 0; i < num_dcfclk; i++) { int j; -- cgit v1.2.3-70-g09d2 From 54935663d3ace0129a6805386fcef3ddd5477934 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Fri, 15 Mar 2024 09:20:42 -0400 Subject: drm/amd/display: Add new IPS config mode [Why] We don't have a way to specify IPS2 for display off but RCG only for static screen and local video playback. [How] Add a new setting that allows RCG only when displays are active but IPS2 when all displays are off. Reviewed-by: Ovidiu Bunea Acked-by: Tom Chung Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 29 +++++++++++++++++++++++++ drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 1 + 2 files changed, 30 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 12c142cae78b..9ae0e602e737 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1201,6 +1201,20 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait) return true; } +static int count_active_streams(const struct dc *dc) +{ + int i, count = 0; + + for (i = 0; i < dc->current_state->stream_count; ++i) { + struct dc_stream_state *stream = dc->current_state->streams[i]; + + if (stream && !stream->dpms_off) + count += 1; + } + + return count; +} + static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) { volatile const struct dmub_shared_state_ips_fw *ips_fw; @@ -1255,6 +1269,21 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; + } else if (dc->config.disable_ips == DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF) { + /* TODO: Move this logic out to hwseq */ + if (count_active_streams(dc) == 0) { + /* IPS2 - Display off */ + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + new_signals.bits.allow_z10 = 1; + } else { + /* RCG only */ + new_signals.bits.allow_pg = 0; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 0; + new_signals.bits.allow_z10 = 0; + } } ips_driver->signals = new_signals; diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 34cb25c6166a..995a54459335 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -624,6 +624,7 @@ enum dmub_ips_disable_type { DMUB_IPS_DISABLE_IPS2 = 3, DMUB_IPS_DISABLE_IPS2_Z10 = 4, DMUB_IPS_DISABLE_DYNAMIC = 5, + DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF = 6, }; #define DMUB_IPS1_ALLOW_MASK 0x00000001 -- cgit v1.2.3-70-g09d2 From e9a09a198bfe276f303b455392739086a948a6f4 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 13 Mar 2024 17:55:42 -0400 Subject: drm/amd/display: Allow Z8 when stutter threshold is not met [Why&How] Some panels don't meet the stutter threshold (4k etc), this leads to power regressions. Allow z8 for panels that don't meet the threshold but support PSR/replay Reviewed-by: Nicholas Kazlauskas Acked-by: Tom Chung Signed-off-by: Bhawanpreet Lakha Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 9 +++++++-- drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c | 10 +++++++++- 2 files changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 38ab9ad60ef8..9c93a9f1eb0c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1085,6 +1085,9 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; bool is_pwrseq0 = link->link_index == 0; + bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 || + link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr); + bool is_replay = link && link->replay_settings.replay_feature_enabled; /* Don't support multi-plane configurations */ if (stream_status->plane_count > 1) @@ -1092,13 +1095,15 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0) return DCN_ZSTATE_SUPPORT_ALLOW; - else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr) - return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; + else if (is_pwrseq0 && (is_psr || is_replay)) + return DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY; else return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW; } else { return DCN_ZSTATE_SUPPORT_DISALLOW; } + + return DCN_ZSTATE_SUPPORT_DISALLOW; } static void dcn20_adjust_freesync_v_startup( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index deb6d162a2d5..59a902313200 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -485,6 +485,7 @@ void dcn31_calculate_wm_and_dlg_fp( { int i, pipe_idx, total_det = 0, active_hubp_count = 0; double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; + uint32_t cstate_enter_plus_exit_z8_ns; dc_assert_fp_enabled(); @@ -504,6 +505,13 @@ void dcn31_calculate_wm_and_dlg_fp( pipes[0].clks_cfg.dcfclk_mhz = dcfclk; pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + cstate_enter_plus_exit_z8_ns = + get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + if (get_stutter_period(&context->bw_ctx.dml, pipes, pipe_cnt) < dc->debug.minimum_z8_residency_time && + cstate_enter_plus_exit_z8_ns < dc->debug.minimum_z8_residency_time * 1000) + cstate_enter_plus_exit_z8_ns = dc->debug.minimum_z8_residency_time * 1000; + /* Set A: * All clocks min required * @@ -514,7 +522,7 @@ void dcn31_calculate_wm_and_dlg_fp( context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = get_wm_z8_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_z8_ns = get_wm_z8_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; -- cgit v1.2.3-70-g09d2 From 1576978f05d1ee61c87c2f3e9e3086686ff29531 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Thu, 14 Mar 2024 13:09:31 -0400 Subject: drm/amd/display: Allow Z8 when stutter threshold is not met for dcn35 [Why&How] Some panels don't meet the stutter threshold (4k etc), this leads to power regressions. Allow z8 for panels that don't meet the threshold but support PSR/replay Reviewed-by: Nicholas Kazlauskas Acked-by: Tom Chung Signed-off-by: Bhawanpreet Lakha Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c | 2 +- drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 33ea89f20449..714c2fe03c5f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -603,7 +603,7 @@ void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context) if (is_pwrseq0 && allow_z10) support = DCN_ZSTATE_SUPPORT_ALLOW; else if (is_pwrseq0 && (is_psr || is_replay)) - support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; + support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY; else if (allow_z8) support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 72cca367062e..e2489eaabb20 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -570,6 +570,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s struct dml2_dcn_clocks out_clks; unsigned int result = 0; bool need_recalculation = false; + uint32_t cstate_enter_plus_exit_z8_ns; if (!context || context->stream_count == 0) return true; @@ -641,6 +642,14 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx); //copy for deciding zstate use context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod; + + cstate_enter_plus_exit_z8_ns = context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns; + + if (context->bw_ctx.dml.vba.StutterPeriod < in_dc->debug.minimum_z8_residency_time && + cstate_enter_plus_exit_z8_ns < in_dc->debug.minimum_z8_residency_time * 1000) + cstate_enter_plus_exit_z8_ns = in_dc->debug.minimum_z8_residency_time * 1000; + + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_z8_ns = cstate_enter_plus_exit_z8_ns; } return result; -- cgit v1.2.3-70-g09d2 From 7b4c74cf22d7584d1eb4a959ad807d6ddf34a126 Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Fri, 15 Mar 2024 14:48:12 -0400 Subject: drm/amd/display: Increase clock table size [why&how] To prevent out of bounds error, we need to increase the clock table size. Reviewed-by: Xi Liu Acked-by: Tom Chung Signed-off-by: Sung Joon Kim Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 0a4dff45731f..cf98411d0799 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -29,10 +29,7 @@ #include "dml2_translation_helper.h" #define NUM_DCFCLK_STAS 5 - -#if defined(CONFIG_DRM_AMD_DC_DCN3_51) #define NUM_DCFCLK_STAS_NEW 8 -#endif void dml2_init_ip_params(struct dml2_context *dml2, const struct dc *in_dc, struct ip_params_st *out) { @@ -258,21 +255,20 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc, struct dml2_policy_build_synthetic_soc_states_scratch *s = &dml2->v20.scratch.create_scratch.build_synthetic_socbb_scratch; struct dml2_policy_build_synthetic_soc_states_params *p = &dml2->v20.scratch.build_synthetic_socbb_params; unsigned int dcfclk_stas_mhz[NUM_DCFCLK_STAS]; -#if defined(CONFIG_DRM_AMD_DC_DCN3_51) unsigned int dcfclk_stas_mhz_new[NUM_DCFCLK_STAS_NEW]; unsigned int dml_project = dml2->v20.dml_core_ctx.project; -#endif + unsigned int i = 0; unsigned int transactions_per_mem_clock = 16; // project specific, depends on used Memory type - p->dcfclk_stas_mhz = dcfclk_stas_mhz; - p->num_dcfclk_stas = NUM_DCFCLK_STAS; -#if defined(CONFIG_DRM_AMD_DC_DCN3_51) if (dml_project == dml_project_dcn351) { p->dcfclk_stas_mhz = dcfclk_stas_mhz_new; p->num_dcfclk_stas = NUM_DCFCLK_STAS_NEW; + } else { + p->dcfclk_stas_mhz = dcfclk_stas_mhz; + p->num_dcfclk_stas = NUM_DCFCLK_STAS; } -#endif + p->in_bbox = in_bbox; p->out_states = out; p->in_states = &dml2->v20.scratch.create_scratch.in_states; -- cgit v1.2.3-70-g09d2 From a9d51813a3332d99d271e500b0c5ab1266b821c0 Mon Sep 17 00:00:00 2001 From: Sridevi Arvindekar Date: Fri, 15 Mar 2024 16:45:49 -0400 Subject: drm/amd/display: Increase number of hpo dp link encoders [Why] Number of hpo dp2 link encoders is increased. Instances are changed. [How] Increased size in resource pool, init for each instance Reviewed-by: Alvin Lee Acked-by: Tom Chung Signed-off-by: Sridevi Arvindekar Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index dcae23faeee3..c1835ad6550f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -47,7 +47,7 @@ #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 -#define MAX_HPO_DP2_LINK_ENCODERS 2 +#define MAX_HPO_DP2_LINK_ENCODERS 4 struct gamma_curve { uint32_t offset; -- cgit v1.2.3-70-g09d2 From 285a7054bf81ffebcd00807017e1d2eb756ad892 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Fri, 15 Mar 2024 17:54:20 -0400 Subject: drm/amd/display: Remove plane and stream pointers from dc scratch [Why&How] Remove several plane and stream pointers from dc for code refactoring. Reviewed-by: Wenjing Liu Acked-by: Tom Chung Signed-off-by: Alvin Lee Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 18 ++--- .../drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 42 ++++++------ drivers/gpu/drm/amd/display/dc/core/dc.c | 80 ++++++++-------------- .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 8 +-- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 16 +---- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 47 ++----------- drivers/gpu/drm/amd/display/dc/dc.h | 26 +++---- drivers/gpu/drm/amd/display/dc/dc_stream.h | 2 +- drivers/gpu/drm/amd/display/dc/dc_types.h | 2 +- .../gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h | 2 +- .../drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c | 10 +-- .../gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c | 10 +-- .../drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 15 ++-- .../drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c | 24 +++---- .../drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 63 ++++++++--------- .../drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c | 66 +++++++++--------- .../drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h | 5 +- .../drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 73 +++++++++----------- drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h | 5 +- drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 2 +- 21 files changed, 218 insertions(+), 300 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 71d2d44681b2..39d0fcc10aea 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5700,8 +5700,8 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->aspect_ratio = get_aspect_ratio(mode_in); - stream->out_transfer_func->type = TF_TYPE_PREDEFINED; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + stream->out_transfer_func.type = TF_TYPE_PREDEFINED; + stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { if (!adjust_colour_depth_from_display_info(timing_out, info) && drm_mode_is_420_also(info, mode_in) && @@ -6319,7 +6319,7 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) stream->use_vsc_sdp_for_colorimetry = true; } - if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) + if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; @@ -8392,13 +8392,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { - bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; - bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; + bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction; + bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func; bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult; - bundle->surface_updates[planes_count].func_shaper = dc_plane->in_shaper_func; - bundle->surface_updates[planes_count].lut3d_func = dc_plane->lut3d_func; - bundle->surface_updates[planes_count].blend_tf = dc_plane->blend_tf; + bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func; + bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func; + bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf; } amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, @@ -8611,7 +8611,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->stream_update.output_csc_transform = &acrtc_state->stream->csc_color_matrix; bundle->stream_update.out_transfer_func = - acrtc_state->stream->out_transfer_func; + &acrtc_state->stream->out_transfer_func; bundle->stream_update.lut3d_func = (struct dc_3dlut *) acrtc_state->stream->lut3d_func; bundle->stream_update.func_shaper = diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index c87b64e464ed..ebabfe3a512f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -571,7 +571,7 @@ static int amdgpu_dm_set_atomic_regamma(struct dc_stream_state *stream, uint32_t regamma_size, bool has_rom, enum dc_transfer_func_predefined tf) { - struct dc_transfer_func *out_tf = stream->out_transfer_func; + struct dc_transfer_func *out_tf = &stream->out_transfer_func; int ret = 0; if (regamma_size || tf != TRANSFER_FUNCTION_LINEAR) { @@ -954,8 +954,8 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * inverse color ramp in legacy userspace. */ crtc->cm_is_degamma_srgb = true; - stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + stream->out_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS; + stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB; /* * Note: although we pass has_rom as parameter here, we never * actually use ROM because the color module only takes the ROM @@ -963,7 +963,7 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) * * See more in mod_color_calculate_regamma_params() */ - r = __set_legacy_tf(stream->out_transfer_func, regamma_lut, + r = __set_legacy_tf(&stream->out_transfer_func, regamma_lut, regamma_size, has_rom); if (r) return r; @@ -1034,7 +1034,7 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc, °amma_size); ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); - dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; + dc_plane_state->in_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS; /* * This case isn't fully correct, but also fairly @@ -1061,12 +1061,12 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc, * map these to the atomic one instead. */ if (crtc->cm_is_degamma_srgb) - dc_plane_state->in_transfer_func->tf = tf; + dc_plane_state->in_transfer_func.tf = tf; else - dc_plane_state->in_transfer_func->tf = + dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR; - r = __set_input_tf(caps, dc_plane_state->in_transfer_func, + r = __set_input_tf(caps, &dc_plane_state->in_transfer_func, degamma_lut, degamma_size); if (r) return r; @@ -1075,12 +1075,12 @@ map_crtc_degamma_to_dc_plane(struct dm_crtc_state *crtc, * For legacy gamma support we need the regamma input * in linear space. Assume that the input is sRGB. */ - dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED; - dc_plane_state->in_transfer_func->tf = tf; + dc_plane_state->in_transfer_func.type = TF_TYPE_PREDEFINED; + dc_plane_state->in_transfer_func.tf = tf; if (tf != TRANSFER_FUNCTION_SRGB && !mod_color_calculate_degamma_params(caps, - dc_plane_state->in_transfer_func, + &dc_plane_state->in_transfer_func, NULL, false)) return -ENOMEM; } @@ -1114,24 +1114,24 @@ __set_dm_plane_degamma(struct drm_plane_state *plane_state, if (!has_degamma_lut && tf == AMDGPU_TRANSFER_FUNCTION_DEFAULT) return -EINVAL; - dc_plane_state->in_transfer_func->tf = amdgpu_tf_to_dc_tf(tf); + dc_plane_state->in_transfer_func.tf = amdgpu_tf_to_dc_tf(tf); if (has_degamma_lut) { ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); - dc_plane_state->in_transfer_func->type = + dc_plane_state->in_transfer_func.type = TF_TYPE_DISTRIBUTED_POINTS; - ret = __set_input_tf(color_caps, dc_plane_state->in_transfer_func, + ret = __set_input_tf(color_caps, &dc_plane_state->in_transfer_func, degamma_lut, degamma_size); if (ret) return ret; } else { - dc_plane_state->in_transfer_func->type = + dc_plane_state->in_transfer_func.type = TF_TYPE_PREDEFINED; if (!mod_color_calculate_degamma_params(color_caps, - dc_plane_state->in_transfer_func, NULL, false)) + &dc_plane_state->in_transfer_func, NULL, false)) return -ENOMEM; } return 0; @@ -1156,11 +1156,11 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, lut3d = __extract_blob_lut(dm_plane_state->lut3d, &lut3d_size); lut3d_size = lut3d != NULL ? lut3d_size : 0; - amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, dc_plane_state->lut3d_func); + amdgpu_dm_atomic_lut3d(lut3d, lut3d_size, &dc_plane_state->lut3d_func); ret = amdgpu_dm_atomic_shaper_lut(shaper_lut, false, amdgpu_tf_to_dc_tf(shaper_tf), shaper_size, - dc_plane_state->in_shaper_func); + &dc_plane_state->in_shaper_func); if (ret) { drm_dbg_kms(plane_state->plane->dev, "setting plane %d shaper LUT failed.\n", @@ -1175,7 +1175,7 @@ amdgpu_dm_plane_set_color_properties(struct drm_plane_state *plane_state, ret = amdgpu_dm_atomic_blend_lut(blend_lut, false, amdgpu_tf_to_dc_tf(blend_tf), - blend_size, dc_plane_state->blend_tf); + blend_size, &dc_plane_state->blend_tf); if (ret) { drm_dbg_kms(plane_state->plane->dev, "setting plane %d gamma lut failed.\n", @@ -1221,8 +1221,8 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, color_caps = &dc_plane_state->ctx->dc->caps.color; /* Initially, we can just bypass the DGM block. */ - dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + dc_plane_state->in_transfer_func.type = TF_TYPE_BYPASS; + dc_plane_state->in_transfer_func.tf = TRANSFER_FUNCTION_LINEAR; /* After, we start to update values according to color props */ has_crtc_cm_degamma = (crtc->cm_has_degamma || crtc->cm_is_degamma_srgb); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index af257b58ac45..667655d0e5b9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2851,55 +2851,45 @@ static void copy_surface_update_to_plane( srf_update->plane_info->layer_index; } - if (srf_update->gamma && - (surface->gamma_correction != - srf_update->gamma)) { - memcpy(&surface->gamma_correction->entries, + if (srf_update->gamma) { + memcpy(&surface->gamma_correction.entries, &srf_update->gamma->entries, sizeof(struct dc_gamma_entries)); - surface->gamma_correction->is_identity = + surface->gamma_correction.is_identity = srf_update->gamma->is_identity; - surface->gamma_correction->num_entries = + surface->gamma_correction.num_entries = srf_update->gamma->num_entries; - surface->gamma_correction->type = + surface->gamma_correction.type = srf_update->gamma->type; } - if (srf_update->in_transfer_func && - (surface->in_transfer_func != - srf_update->in_transfer_func)) { - surface->in_transfer_func->sdr_ref_white_level = + if (srf_update->in_transfer_func) { + surface->in_transfer_func.sdr_ref_white_level = srf_update->in_transfer_func->sdr_ref_white_level; - surface->in_transfer_func->tf = + surface->in_transfer_func.tf = srf_update->in_transfer_func->tf; - surface->in_transfer_func->type = + surface->in_transfer_func.type = srf_update->in_transfer_func->type; - memcpy(&surface->in_transfer_func->tf_pts, + memcpy(&surface->in_transfer_func.tf_pts, &srf_update->in_transfer_func->tf_pts, sizeof(struct dc_transfer_func_distributed_points)); } - if (srf_update->func_shaper && - (surface->in_shaper_func != - srf_update->func_shaper)) - memcpy(surface->in_shaper_func, srf_update->func_shaper, - sizeof(*surface->in_shaper_func)); + if (srf_update->func_shaper) + memcpy(&surface->in_shaper_func, srf_update->func_shaper, + sizeof(surface->in_shaper_func)); - if (srf_update->lut3d_func && - (surface->lut3d_func != - srf_update->lut3d_func)) - memcpy(surface->lut3d_func, srf_update->lut3d_func, - sizeof(*surface->lut3d_func)); + if (srf_update->lut3d_func) + memcpy(&surface->lut3d_func, srf_update->lut3d_func, + sizeof(surface->lut3d_func)); if (srf_update->hdr_mult.value) surface->hdr_mult = srf_update->hdr_mult; - if (srf_update->blend_tf && - (surface->blend_tf != - srf_update->blend_tf)) - memcpy(surface->blend_tf, srf_update->blend_tf, - sizeof(*surface->blend_tf)); + if (srf_update->blend_tf) + memcpy(&surface->blend_tf, srf_update->blend_tf, + sizeof(surface->blend_tf)); if (srf_update->input_csc_color_matrix) surface->input_csc_color_matrix = @@ -2930,14 +2920,13 @@ static void copy_stream_update_to_stream(struct dc *dc, if (update->dst.height && update->dst.width) stream->dst = update->dst; - if (update->out_transfer_func && - stream->out_transfer_func != update->out_transfer_func) { - stream->out_transfer_func->sdr_ref_white_level = + if (update->out_transfer_func) { + stream->out_transfer_func.sdr_ref_white_level = update->out_transfer_func->sdr_ref_white_level; - stream->out_transfer_func->tf = update->out_transfer_func->tf; - stream->out_transfer_func->type = + stream->out_transfer_func.tf = update->out_transfer_func->tf; + stream->out_transfer_func.type = update->out_transfer_func->type; - memcpy(&stream->out_transfer_func->tf_pts, + memcpy(&stream->out_transfer_func.tf_pts, &update->out_transfer_func->tf_pts, sizeof(struct dc_transfer_func_distributed_points)); } @@ -3050,15 +3039,8 @@ static void backup_planes_and_stream_state( for (i = 0; i < status->plane_count; i++) { scratch->plane_states[i] = *status->plane_states[i]; - scratch->gamma_correction[i] = *status->plane_states[i]->gamma_correction; - scratch->in_transfer_func[i] = *status->plane_states[i]->in_transfer_func; - scratch->lut3d_func[i] = *status->plane_states[i]->lut3d_func; - scratch->in_shaper_func[i] = *status->plane_states[i]->in_shaper_func; - scratch->blend_tf[i] = *status->plane_states[i]->blend_tf; } scratch->stream_state = *stream; - if (stream->out_transfer_func) - scratch->out_transfer_func = *stream->out_transfer_func; } static void restore_planes_and_stream_state( @@ -3073,15 +3055,8 @@ static void restore_planes_and_stream_state( for (i = 0; i < status->plane_count; i++) { *status->plane_states[i] = scratch->plane_states[i]; - *status->plane_states[i]->gamma_correction = scratch->gamma_correction[i]; - *status->plane_states[i]->in_transfer_func = scratch->in_transfer_func[i]; - *status->plane_states[i]->lut3d_func = scratch->lut3d_func[i]; - *status->plane_states[i]->in_shaper_func = scratch->in_shaper_func[i]; - *status->plane_states[i]->blend_tf = scratch->blend_tf[i]; } *stream = scratch->stream_state; - if (stream->out_transfer_func) - *stream->out_transfer_func = scratch->out_transfer_func; } /** @@ -5354,10 +5329,13 @@ void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) } dc->clk_mgr->dc_mode_softmax_enabled = enable; } -bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, +bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, struct dc_cursor_attributes *cursor_attr) { - if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr)) + if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr)) return true; return false; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 9c05b1a07142..3b1b324c0824 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -392,10 +392,10 @@ void get_hdr_visual_confirm_color( switch (top_pipe_ctx->plane_res.scl_data.format) { case PIXEL_FORMAT_ARGB2101010: - if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { + if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) { /* HDR10, ARGB2101010 - set border color to red */ color->color_r_cr = color_value; - } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { + } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) { /* FreeSync 2 ARGB2101010 - set border color to pink */ color->color_r_cr = color_value; color->color_b_cb = color_value; @@ -403,10 +403,10 @@ void get_hdr_visual_confirm_color( is_sdr = true; break; case PIXEL_FORMAT_FP16: - if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) { + if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_PQ) { /* HDR10, FP16 - set border color to blue */ color->color_b_cb = color_value; - } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { + } else if (top_pipe_ctx->stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) { /* FreeSync 2 HDR - set border color to green */ color->color_g_y = color_value; } else diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 2b01b4a3861d..dd0428024173 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -4041,7 +4041,7 @@ static void set_avi_info_frame( } if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR && - stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) { + stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) { hdmi_info.bits.EC0_EC2 = 0; hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 212e057ed9b0..ee6493a9a79c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -116,12 +116,7 @@ bool dc_stream_construct(struct dc_stream_state *stream, update_stream_signal(stream, dc_sink_data); - stream->out_transfer_func = dc_create_transfer_func(); - if (stream->out_transfer_func == NULL) { - dc_sink_release(dc_sink_data); - return false; - } - stream->out_transfer_func->type = TF_TYPE_BYPASS; + stream->out_transfer_func.type = TF_TYPE_BYPASS; dc_stream_assign_stream_id(stream); @@ -131,10 +126,6 @@ bool dc_stream_construct(struct dc_stream_state *stream, void dc_stream_destruct(struct dc_stream_state *stream) { dc_sink_release(stream->sink); - if (stream->out_transfer_func != NULL) { - dc_transfer_func_release(stream->out_transfer_func); - stream->out_transfer_func = NULL; - } } void dc_stream_assign_stream_id(struct dc_stream_state *stream) @@ -201,9 +192,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream) if (new_stream->sink) dc_sink_retain(new_stream->sink); - if (new_stream->out_transfer_func) - dc_transfer_func_retain(new_stream->out_transfer_func); - dc_stream_assign_stream_id(new_stream); /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */ @@ -425,7 +413,7 @@ bool dc_stream_add_writeback(struct dc *dc, dc_exit_ips_for_hw_access(dc); - wb_info->dwb_params.out_transfer_func = stream->out_transfer_func; + wb_info->dwb_params.out_transfer_func = &stream->out_transfer_func; dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; dwb->dwb_is_drc = false; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index acc7a8baa169..ea624e000ec0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -41,25 +41,15 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta { plane_state->ctx = ctx; - plane_state->gamma_correction = dc_create_gamma(); - if (plane_state->gamma_correction != NULL) - plane_state->gamma_correction->is_identity = true; + plane_state->gamma_correction.is_identity = true; - plane_state->in_transfer_func = dc_create_transfer_func(); - if (plane_state->in_transfer_func != NULL) { - plane_state->in_transfer_func->type = TF_TYPE_BYPASS; - } - plane_state->in_shaper_func = dc_create_transfer_func(); - if (plane_state->in_shaper_func != NULL) { - plane_state->in_shaper_func->type = TF_TYPE_BYPASS; - } + plane_state->in_transfer_func.type = TF_TYPE_BYPASS; - plane_state->lut3d_func = dc_create_3dlut_func(); + plane_state->in_shaper_func.type = TF_TYPE_BYPASS; - plane_state->blend_tf = dc_create_transfer_func(); - if (plane_state->blend_tf != NULL) { - plane_state->blend_tf->type = TF_TYPE_BYPASS; - } + plane_state->lut3d_func.state.raw = 0; + + plane_state->blend_tf.type = TF_TYPE_BYPASS; plane_state->pre_multiplied_alpha = true; @@ -67,30 +57,7 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_sta void dc_plane_destruct(struct dc_plane_state *plane_state) { - if (plane_state->gamma_correction != NULL) { - dc_gamma_release(&plane_state->gamma_correction); - } - if (plane_state->in_transfer_func != NULL) { - dc_transfer_func_release( - plane_state->in_transfer_func); - plane_state->in_transfer_func = NULL; - } - if (plane_state->in_shaper_func != NULL) { - dc_transfer_func_release( - plane_state->in_shaper_func); - plane_state->in_shaper_func = NULL; - } - if (plane_state->lut3d_func != NULL) { - dc_3dlut_func_release( - plane_state->lut3d_func); - plane_state->lut3d_func = NULL; - } - if (plane_state->blend_tf != NULL) { - dc_transfer_func_release( - plane_state->blend_tf); - plane_state->blend_tf = NULL; - } - + // no more pointers to free within dc_plane_state } /******************************************************************************* diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 52fd2ebdcdbb..dd54de5f3b2f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1212,6 +1212,8 @@ union surface_update_flags { uint32_t raw; }; +#define DC_REMOVE_PLANE_POINTERS 1 + struct dc_plane_state { struct dc_plane_address address; struct dc_plane_flip_time time; @@ -1226,8 +1228,8 @@ struct dc_plane_state { struct dc_plane_dcc_param dcc; - struct dc_gamma *gamma_correction; - struct dc_transfer_func *in_transfer_func; + struct dc_gamma gamma_correction; + struct dc_transfer_func in_transfer_func; struct dc_bias_and_scale *bias_and_scale; struct dc_csc_transform input_csc_color_matrix; struct fixed31_32 coeff_reduction_factor; @@ -1239,9 +1241,9 @@ struct dc_plane_state { enum dc_color_space color_space; - struct dc_3dlut *lut3d_func; - struct dc_transfer_func *in_shaper_func; - struct dc_transfer_func *blend_tf; + struct dc_3dlut lut3d_func; + struct dc_transfer_func in_shaper_func; + struct dc_transfer_func blend_tf; struct dc_transfer_func *gamcor_tf; enum surface_pixel_format format; @@ -1308,14 +1310,8 @@ struct dc_scratch_space { * a valid current state during dc update. */ struct dc_plane_state plane_states[MAX_SURFACE_NUM]; - struct dc_gamma gamma_correction[MAX_SURFACE_NUM]; - struct dc_transfer_func in_transfer_func[MAX_SURFACE_NUM]; - struct dc_3dlut lut3d_func[MAX_SURFACE_NUM]; - struct dc_transfer_func in_shaper_func[MAX_SURFACE_NUM]; - struct dc_transfer_func blend_tf[MAX_SURFACE_NUM]; struct dc_stream_state stream_state; - struct dc_transfer_func out_transfer_func; }; struct dc { @@ -1384,6 +1380,7 @@ struct dc { } update_bw_bounding_box; struct dc_scratch_space current_state; struct dc_scratch_space new_state; + struct dc_stream_state temp_stream; // Used so we don't need to allocate stream on the stack } scratch; struct dml2_configuration_options dml2_options; @@ -2364,8 +2361,11 @@ bool dc_is_dmcu_initialized(struct dc *dc); enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping); void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); -bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane, - struct dc_cursor_attributes *cursor_attr); +bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, + struct dc_cursor_attributes *cursor_attr); #define dc_allow_idle_optimizations(dc, allow) dc_allow_idle_optimizations_internal(dc, allow, __func__) #define dc_exit_ips_for_hw_access(dc) dc_exit_ips_for_hw_access_internal(dc, __func__) diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index ee10941caa59..68dc668f3e14 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -190,7 +190,7 @@ struct dc_stream_state { PHYSICAL_ADDRESS_LOC dmdata_address; bool use_dynamic_meta; - struct dc_transfer_func *out_transfer_func; + struct dc_transfer_func out_transfer_func; struct colorspace_transform gamut_remap_matrix; struct dc_csc_transform csc_color_matrix; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index be2ac5c442a4..e219a1812360 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -422,7 +422,7 @@ struct dc_dwb_params { enum dwb_capture_rate capture_rate; /* controls the frame capture rate */ struct scaling_taps scaler_taps; /* Scaling taps */ enum dwb_subsample_position subsample_position; - struct dc_transfer_func *out_transfer_func; + const struct dc_transfer_func *out_transfer_func; }; /* audio*/ diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h index 672cde46c4b9..49cb25c9cb36 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h @@ -736,7 +736,7 @@ bool dpp20_program_shaper( bool dpp20_program_3dlut( struct dpp *dpp_base, - struct tetrahedral_params *params); + const struct tetrahedral_params *params); void dpp2_cnv_set_alpha_keyer( struct dpp *dpp_base, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c index 131a8de8e0f8..f43fa29971f2 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c @@ -1114,15 +1114,15 @@ static void dpp20_select_3dlut_ram_mask( bool dpp20_program_3dlut( struct dpp *dpp_base, - struct tetrahedral_params *params) + const struct tetrahedral_params *params) { enum dc_lut_mode mode; bool is_17x17x17; bool is_12bits_color_channel; - struct dc_rgb *lut0; - struct dc_rgb *lut1; - struct dc_rgb *lut2; - struct dc_rgb *lut3; + const struct dc_rgb *lut0; + const struct dc_rgb *lut1; + const struct dc_rgb *lut2; + const struct dc_rgb *lut3; int lut_size0; int lut_size; diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 0c31b12cacaf..58095ac75506 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -1384,15 +1384,15 @@ static void dpp3_select_3dlut_ram_mask( } static bool dpp3_program_3dlut(struct dpp *dpp_base, - struct tetrahedral_params *params) + const struct tetrahedral_params *params) { enum dc_lut_mode mode; bool is_17x17x17; bool is_12bits_color_channel; - struct dc_rgb *lut0; - struct dc_rgb *lut1; - struct dc_rgb *lut2; - struct dc_rgb *lut3; + const struct dc_rgb *lut0; + const struct dc_rgb *lut1; + const struct dc_rgb *lut2; + const struct dc_rgb *lut3; int lut_size0; int lut_size; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index de5542c20103..35c631c22934 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -289,16 +289,14 @@ dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, if (ipp == NULL) return false; - if (plane_state->in_transfer_func) - tf = plane_state->in_transfer_func; + tf = &plane_state->in_transfer_func; build_prescale_params(&prescale_params, plane_state); ipp->funcs->ipp_program_prescale(ipp, &prescale_params); - if (plane_state->gamma_correction && - !plane_state->gamma_correction->is_identity && + if (!plane_state->gamma_correction.is_identity && dce_use_lut(plane_state->format)) - ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction); + ipp->funcs->ipp_program_input_lut(ipp, &plane_state->gamma_correction); if (tf == NULL) { /* Default case if no input transfer function specified */ @@ -614,11 +612,10 @@ dce110_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, xfm->funcs->opp_power_on_regamma_lut(xfm, true); xfm->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM; - if (stream->out_transfer_func && - stream->out_transfer_func->type == TF_TYPE_PREDEFINED && - stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) { + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED && + stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB) { xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_SRGB); - } else if (dce110_translate_regamma_to_hw_format(stream->out_transfer_func, + } else if (dce110_translate_regamma_to_hw_format(&stream->out_transfer_func, &xfm->regamma_params)) { xfm->funcs->opp_program_regamma_pwl(xfm, &xfm->regamma_params); xfm->funcs->opp_set_regamma_mode(xfm, OPP_REGAMMA_USER); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index e0c3c14d12f3..3940f25f7d9f 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -1829,14 +1829,12 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, if (dpp_base == NULL) return false; - if (plane_state->in_transfer_func) - tf = plane_state->in_transfer_func; + tf = &plane_state->in_transfer_func; - if (plane_state->gamma_correction && - !dpp_base->ctx->dc->debug.always_use_regamma - && !plane_state->gamma_correction->is_identity + if (!dpp_base->ctx->dc->debug.always_use_regamma + && !plane_state->gamma_correction.is_identity && dce_use_lut(plane_state->format)) - dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); + dpp_base->funcs->dpp_program_input_lut(dpp_base, &plane_state->gamma_correction); if (tf == NULL) dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); @@ -1877,7 +1875,7 @@ bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, #define MAX_NUM_HW_POINTS 0x200 static void log_tf(struct dc_context *ctx, - struct dc_transfer_func *tf, uint32_t hw_points_num) + const struct dc_transfer_func *tf, uint32_t hw_points_num) { // DC_LOG_GAMMA is default logging of all hw points // DC_LOG_ALL_GAMMA logs all points, not only hw points @@ -1914,16 +1912,15 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM; - if (stream->out_transfer_func && - stream->out_transfer_func->type == TF_TYPE_PREDEFINED && - stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED && + stream->out_transfer_func.tf == TRANSFER_FUNCTION_SRGB) dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB); /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full * update. */ else if (cm_helper_translate_curve_to_hw_format(dc->ctx, - stream->out_transfer_func, + &stream->out_transfer_func, &dpp->regamma_params, false)) { dpp->funcs->dpp_program_regamma_pwl( dpp, @@ -1931,10 +1928,9 @@ bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, } else dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS); - if (stream->ctx && - stream->out_transfer_func) { + if (stream->ctx) { log_tf(stream->ctx, - stream->out_transfer_func, + &stream->out_transfer_func, dpp->regamma_params.hw_points_num); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index eae71f448143..1530b1f8f8be 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1011,7 +1011,7 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, { int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; /* * program OGAM only for the top pipe * if there is a pipe split then fix diagnostic is required: @@ -1022,19 +1022,19 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, if (mpc->funcs->power_on_mpc_mem_pwr) mpc->funcs->power_on_mpc_mem_pwr(mpc, mpcc_id, true); if (pipe_ctx->top_pipe == NULL - && mpc->funcs->set_output_gamma && stream->out_transfer_func) { - if (stream->out_transfer_func->type == TF_TYPE_HWPWL) - params = &stream->out_transfer_func->pwl; - else if (pipe_ctx->stream->out_transfer_func->type == + && mpc->funcs->set_output_gamma) { + if (stream->out_transfer_func.type == TF_TYPE_HWPWL) + params = &stream->out_transfer_func.pwl; + else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && cm_helper_translate_curve_to_hw_format(dc->ctx, - stream->out_transfer_func, + &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; /* * there is no ROM */ - if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) BREAK_TO_DEBUGGER(); } /* @@ -1050,17 +1050,15 @@ bool dcn20_set_blend_lut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; bool result = true; - struct pwl_params *blend_lut = NULL; - - if (plane_state->blend_tf) { - if (plane_state->blend_tf->type == TF_TYPE_HWPWL) - blend_lut = &plane_state->blend_tf->pwl; - else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format(plane_state->ctx, - plane_state->blend_tf, - &dpp_base->regamma_params, false); - blend_lut = &dpp_base->regamma_params; - } + const struct pwl_params *blend_lut = NULL; + + if (plane_state->blend_tf.type == TF_TYPE_HWPWL) + blend_lut = &plane_state->blend_tf.pwl; + else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->blend_tf, + &dpp_base->regamma_params, false); + blend_lut = &dpp_base->regamma_params; } result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); @@ -1072,24 +1070,21 @@ bool dcn20_set_shaper_3dlut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; bool result = true; - struct pwl_params *shaper_lut = NULL; - - if (plane_state->in_shaper_func) { - if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) - shaper_lut = &plane_state->in_shaper_func->pwl; - else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm_helper_translate_curve_to_hw_format(plane_state->ctx, - plane_state->in_shaper_func, - &dpp_base->shaper_params, true); - shaper_lut = &dpp_base->shaper_params; - } + const struct pwl_params *shaper_lut = NULL; + + if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) + shaper_lut = &plane_state->in_shaper_func.pwl; + else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm_helper_translate_curve_to_hw_format(plane_state->ctx, + &plane_state->in_shaper_func, + &dpp_base->shaper_params, true); + shaper_lut = &dpp_base->shaper_params; } result = dpp_base->funcs->dpp_program_shaper_lut(dpp_base, shaper_lut); - if (plane_state->lut3d_func && - plane_state->lut3d_func->state.bits.initialized == 1) + if (plane_state->lut3d_func.state.bits.initialized == 1) result = dpp_base->funcs->dpp_program_3dlut(dpp_base, - &plane_state->lut3d_func->lut_3d); + &plane_state->lut3d_func.lut_3d); else result = dpp_base->funcs->dpp_program_3dlut(dpp_base, NULL); @@ -1112,9 +1107,7 @@ bool dcn20_set_input_transfer_func(struct dc *dc, hws->funcs.set_shaper_3dlut(pipe_ctx, plane_state); hws->funcs.set_blend_lut(pipe_ctx, plane_state); - if (plane_state->in_transfer_func) - tf = plane_state->in_transfer_func; - + tf = &plane_state->in_transfer_func; if (tf == NULL) { dpp_base->funcs->dpp_set_degamma(dpp_base, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 8bc3d01537bb..af3a26c2656b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -223,16 +223,14 @@ bool dcn30_set_blend_lut( { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; bool result = true; - struct pwl_params *blend_lut = NULL; - - if (plane_state->blend_tf) { - if (plane_state->blend_tf->type == TF_TYPE_HWPWL) - blend_lut = &plane_state->blend_tf->pwl; - else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm3_helper_translate_curve_to_hw_format( - plane_state->blend_tf, &dpp_base->regamma_params, false); - blend_lut = &dpp_base->regamma_params; - } + const struct pwl_params *blend_lut = NULL; + + if (plane_state->blend_tf.type == TF_TYPE_HWPWL) + blend_lut = &plane_state->blend_tf.pwl; + else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm3_helper_translate_curve_to_hw_format( + &plane_state->blend_tf, &dpp_base->regamma_params, false); + blend_lut = &dpp_base->regamma_params; } result = dpp_base->funcs->dpp_program_blnd_lut(dpp_base, blend_lut); @@ -300,27 +298,24 @@ bool dcn30_set_input_transfer_func(struct dc *dc, struct dpp *dpp_base = pipe_ctx->plane_res.dpp; enum dc_transfer_func_predefined tf; bool result = true; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; if (dpp_base == NULL || plane_state == NULL) return false; tf = TRANSFER_FUNCTION_UNITY; - if (plane_state->in_transfer_func && - plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED) - tf = plane_state->in_transfer_func->tf; + if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED) + tf = plane_state->in_transfer_func.tf; dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); - if (plane_state->in_transfer_func) { - if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL) - params = &plane_state->in_transfer_func->pwl; - else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func, - &dpp_base->degamma_params, false)) - params = &dpp_base->degamma_params; - } + if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) + params = &plane_state->in_transfer_func.pwl; + else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && + cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, + &dpp_base->degamma_params, false)) + params = &dpp_base->degamma_params; result = dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); @@ -378,24 +373,24 @@ bool dcn30_set_output_transfer_func(struct dc *dc, { int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; bool ret = false; /* program OGAM or 3DLUT only for the top pipe*/ if (pipe_ctx->top_pipe == NULL) { /*program rmu shaper and 3dlut in MPC*/ ret = dcn30_set_mpc_shaper_3dlut(pipe_ctx, stream); - if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) { - if (stream->out_transfer_func->type == TF_TYPE_HWPWL) - params = &stream->out_transfer_func->pwl; - else if (pipe_ctx->stream->out_transfer_func->type == + if (ret == false && mpc->funcs->set_output_gamma) { + if (stream->out_transfer_func.type == TF_TYPE_HWPWL) + params = &stream->out_transfer_func.pwl; + else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && cm3_helper_translate_curve_to_hw_format( - stream->out_transfer_func, + &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; /* there are no ROM LUTs in OUTGAM */ - if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) BREAK_TO_DEBUGGER(); } } @@ -946,7 +941,8 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB8888 && plane->address.page_table_base.quad_part == 0 && dc->hwss.does_plane_fit_in_mall && - dc->hwss.does_plane_fit_in_mall(dc, plane, + dc->hwss.does_plane_fit_in_mall(dc, plane->plane_size.surface_pitch, + plane->plane_size.surface_size.height, plane->format, cursor_cache_enable ? &cursor_attr : NULL)) { unsigned int v_total = stream->adjust.v_total_max ? stream->adjust.v_total_max : stream->timing.v_total; @@ -1076,11 +1072,15 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) return true; } -bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, struct dc_cursor_attributes *cursor_attr) +bool dcn30_does_plane_fit_in_mall(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, + struct dc_cursor_attributes *cursor_attr) { // add meta size? - unsigned int surface_size = plane->plane_size.surface_pitch * plane->plane_size.surface_size.height * - (plane->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4); + unsigned int surface_size = pitch * height * + (format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4); unsigned int mall_size = dc->caps.mall_size_total; unsigned int cursor_size = 0; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h index 638f018a3cb5..76b16839486a 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h @@ -71,7 +71,10 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable); void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx); void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx); -bool dcn30_does_plane_fit_in_mall(struct dc *dc, struct dc_plane_state *plane, +bool dcn30_does_plane_fit_in_mall(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, struct dc_cursor_attributes *cursor_attr); bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 62ff99463834..0f522f8a7228 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -479,39 +479,35 @@ bool dcn32_set_mcm_luts( int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; bool result = true; - struct pwl_params *lut_params = NULL; + const struct pwl_params *lut_params = NULL; // 1D LUT - if (plane_state->blend_tf) { - if (plane_state->blend_tf->type == TF_TYPE_HWPWL) - lut_params = &plane_state->blend_tf->pwl; - else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) { - cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf, - &dpp_base->regamma_params, false); - lut_params = &dpp_base->regamma_params; - } + if (plane_state->blend_tf.type == TF_TYPE_HWPWL) + lut_params = &plane_state->blend_tf.pwl; + else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) { + cm3_helper_translate_curve_to_hw_format(&plane_state->blend_tf, + &dpp_base->regamma_params, false); + lut_params = &dpp_base->regamma_params; } result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id); lut_params = NULL; // Shaper - if (plane_state->in_shaper_func) { - if (plane_state->in_shaper_func->type == TF_TYPE_HWPWL) - lut_params = &plane_state->in_shaper_func->pwl; - else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) { - // TODO: dpp_base replace - ASSERT(false); - cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func, - &dpp_base->shaper_params, true); - lut_params = &dpp_base->shaper_params; - } + if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL) + lut_params = &plane_state->in_shaper_func.pwl; + else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) { + // TODO: dpp_base replace + ASSERT(false); + cm3_helper_translate_curve_to_hw_format(&plane_state->in_shaper_func, + &dpp_base->shaper_params, true); + lut_params = &dpp_base->shaper_params; } result = mpc->funcs->program_shaper(mpc, lut_params, mpcc_id); // 3D - if (plane_state->lut3d_func && plane_state->lut3d_func->state.bits.initialized == 1) - result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func->lut_3d, mpcc_id); + if (plane_state->lut3d_func.state.bits.initialized == 1) + result = mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id); else result = mpc->funcs->program_3dlut(mpc, NULL, mpcc_id); @@ -528,27 +524,24 @@ bool dcn32_set_input_transfer_func(struct dc *dc, enum dc_transfer_func_predefined tf; bool result = true; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; if (mpc == NULL || plane_state == NULL) return false; tf = TRANSFER_FUNCTION_UNITY; - if (plane_state->in_transfer_func && - plane_state->in_transfer_func->type == TF_TYPE_PREDEFINED) - tf = plane_state->in_transfer_func->tf; + if (plane_state->in_transfer_func.type == TF_TYPE_PREDEFINED) + tf = plane_state->in_transfer_func.tf; dpp_base->funcs->dpp_set_pre_degam(dpp_base, tf); - if (plane_state->in_transfer_func) { - if (plane_state->in_transfer_func->type == TF_TYPE_HWPWL) - params = &plane_state->in_transfer_func->pwl; - else if (plane_state->in_transfer_func->type == TF_TYPE_DISTRIBUTED_POINTS && - cm3_helper_translate_curve_to_hw_format(plane_state->in_transfer_func, - &dpp_base->degamma_params, false)) - params = &dpp_base->degamma_params; - } + if (plane_state->in_transfer_func.type == TF_TYPE_HWPWL) + params = &plane_state->in_transfer_func.pwl; + else if (plane_state->in_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && + cm3_helper_translate_curve_to_hw_format(&plane_state->in_transfer_func, + &dpp_base->degamma_params, false)) + params = &dpp_base->degamma_params; dpp_base->funcs->dpp_program_gamcor_lut(dpp_base, params); @@ -566,24 +559,24 @@ bool dcn32_set_output_transfer_func(struct dc *dc, { int mpcc_id = pipe_ctx->plane_res.hubp->inst; struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc; - struct pwl_params *params = NULL; + const struct pwl_params *params = NULL; bool ret = false; /* program OGAM or 3DLUT only for the top pipe*/ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD)) { /*program shaper and 3dlut in MPC*/ ret = dcn32_set_mpc_shaper_3dlut(pipe_ctx, stream); - if (ret == false && mpc->funcs->set_output_gamma && stream->out_transfer_func) { - if (stream->out_transfer_func->type == TF_TYPE_HWPWL) - params = &stream->out_transfer_func->pwl; - else if (pipe_ctx->stream->out_transfer_func->type == + if (ret == false && mpc->funcs->set_output_gamma) { + if (stream->out_transfer_func.type == TF_TYPE_HWPWL) + params = &stream->out_transfer_func.pwl; + else if (pipe_ctx->stream->out_transfer_func.type == TF_TYPE_DISTRIBUTED_POINTS && cm3_helper_translate_curve_to_hw_format( - stream->out_transfer_func, + &stream->out_transfer_func, &mpc->blender_params, false)) params = &mpc->blender_params; /* there are no ROM LUTs in OUTGAM */ - if (stream->out_transfer_func->type == TF_TYPE_PREDEFINED) + if (stream->out_transfer_func.type == TF_TYPE_PREDEFINED) BREAK_TO_DEBUGGER(); } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 65df90842b83..2368fad61b41 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -377,7 +377,10 @@ struct hw_sequencer_funcs { /* Idle Optimization Related */ bool (*apply_idle_power_optimizations)(struct dc *dc, bool enable); - bool (*does_plane_fit_in_mall)(struct dc *dc, struct dc_plane_state *plane, + bool (*does_plane_fit_in_mall)(struct dc *dc, + unsigned int pitch, + unsigned int height, + enum surface_pixel_format format, struct dc_cursor_attributes *cursor_attr); void (*commit_subvp_config)(struct dc *dc, struct dc_state *context); void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 0f24afbf4388..ca8de345d039 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -322,7 +322,7 @@ struct dpp_funcs { const struct pwl_params *params); bool (*dpp_program_3dlut)( struct dpp *dpp, - struct tetrahedral_params *params); + const struct tetrahedral_params *params); void (*dpp_cnv_set_alpha_keyer)( struct dpp *dpp_base, struct cnv_color_keyer_params *color_keyer); -- cgit v1.2.3-70-g09d2 From 5034b935f62a43ed669aea491ff1a86fd995df3a Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Fri, 15 Mar 2024 16:50:47 -0400 Subject: drm/amd/display: Modify DHCUB waterwark structures and functions [WHY&HOW] Converting the watermark set structure to a union and modifying some interfaces to accommodate future usage. Reviewed-by: Chaitanya Dhere Acked-by: Tom Chung Signed-off-by: Dillon Varone Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 8 ++++---- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h | 10 +++++----- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h | 2 +- drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c | 8 ++++---- drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h | 8 ++++---- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h | 2 +- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c | 8 ++++---- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c | 10 +++++----- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h | 8 ++++---- drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c | 4 ++-- drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 16 ++++++++++------ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h | 12 +++++++----- 17 files changed, 56 insertions(+), 50 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index d51f1ce02874..d9ade9ee0aeb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -242,7 +242,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub) bool hubbub1_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -356,7 +356,7 @@ bool hubbub1_program_urgent_watermarks( bool hubbub1_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -501,7 +501,7 @@ bool hubbub1_program_stutter_watermarks( bool hubbub1_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -582,7 +582,7 @@ bool hubbub1_program_pstate_watermarks( bool hubbub1_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index 4201b7627030..d1f9e63944c8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -409,7 +409,7 @@ struct dcn10_hubbub { const struct dcn_hubbub_shift *shifts; const struct dcn_hubbub_mask *masks; unsigned int debug_test_index_pstate; - struct dcn_watermark_set watermarks; + union dcn_watermark_set watermarks; }; void hubbub1_update_dchub( @@ -423,7 +423,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub); bool hubbub1_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); @@ -446,17 +446,17 @@ void hubbub1_construct(struct hubbub *hubbub, bool hubbub1_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub1_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub1_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index 6eebcb22e317..c6f859871d11 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -570,7 +570,7 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub, static bool hubbub2_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h index 2f6146bf1d32..24a9c45988ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h @@ -85,7 +85,7 @@ struct dcn20_hubbub { const struct dcn_hubbub_shift *shifts; const struct dcn_hubbub_mask *masks; unsigned int debug_test_index_pstate; - struct dcn_watermark_set watermarks; + union dcn_watermark_set watermarks; int num_vmid; struct dcn20_vmid vmid[16]; unsigned int detile_buf_size; diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c index 037d265431c6..09ea65acb2c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c @@ -52,7 +52,7 @@ static bool hubbub201_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index aeb0e0d9b70a..2546224b326a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -140,7 +140,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub, bool hubbub21_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -334,7 +334,7 @@ bool hubbub21_program_urgent_watermarks( bool hubbub21_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -487,7 +487,7 @@ bool hubbub21_program_stutter_watermarks( bool hubbub21_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -573,7 +573,7 @@ bool hubbub21_program_pstate_watermarks( bool hubbub21_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h index d8eb2bb7282c..ab2ce0313529 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h @@ -127,22 +127,22 @@ int hubbub21_init_dchub(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config); bool hubbub21_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub21_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub21_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub21_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c index 152c9c5733f1..6a5af3da4b45 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.c @@ -95,7 +95,7 @@ int hubbub3_init_dchub_sys_ctx(struct hubbub *hubbub, bool hubbub3_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h index 7b597908b937..ca6233e8f1f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubbub.h @@ -124,7 +124,7 @@ bool hubbub3_get_dcc_compression_cap(struct hubbub *hubbub, bool hubbub3_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c index 5b5b5e0775fa..b906db6e7355 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c @@ -172,7 +172,7 @@ static uint32_t convert_and_clamp( static bool hubbub31_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -362,7 +362,7 @@ static bool hubbub31_program_urgent_watermarks( static bool hubbub31_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -635,7 +635,7 @@ static bool hubbub31_program_stutter_watermarks( static bool hubbub31_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -717,7 +717,7 @@ static bool hubbub31_program_pstate_watermarks( static bool hubbub31_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c index 88dfc907553d..515c4c2b4c21 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c @@ -167,7 +167,7 @@ static uint32_t convert_and_clamp( bool hubbub32_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -357,7 +357,7 @@ bool hubbub32_program_urgent_watermarks( bool hubbub32_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -503,7 +503,7 @@ bool hubbub32_program_stutter_watermarks( bool hubbub32_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -656,7 +656,7 @@ bool hubbub32_program_pstate_watermarks( bool hubbub32_program_usr_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -750,7 +750,7 @@ void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow) static bool hubbub32_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h index f073839a4b6d..e439ba0fa30f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h @@ -118,25 +118,25 @@ bool hubbub32_program_urgent_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub32_program_stutter_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub32_program_pstate_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); bool hubbub32_program_usr_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c index 339bf0c722dd..6293173ba2b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubbub.c @@ -111,7 +111,7 @@ static uint32_t convert_and_clamp( static bool hubbub35_program_stutter_z8_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { @@ -297,7 +297,7 @@ static void hubbub35_get_dchub_ref_freq(struct hubbub *hubbub, static bool hubbub35_program_watermarks( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 1530b1f8f8be..f983041ce9a4 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1910,9 +1910,11 @@ static void dcn20_program_pipe( dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub); } - if (dc->res_pool->hubbub->funcs->program_det_size && pipe_ctx->update_flags.bits.det_size) - dc->res_pool->hubbub->funcs->program_det_size( - dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); + if (pipe_ctx->update_flags.bits.det_size) { + if (dc->res_pool->hubbub->funcs->program_det_size) + dc->res_pool->hubbub->funcs->program_det_size( + dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb); + } if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) dcn20_update_dchubp_dpp(dc, pipe_ctx, context); @@ -2073,9 +2075,11 @@ void dcn20_program_front_end_for_ctx( * turned on (i.e. in an MCLK switch) which can come in too late and cause issues with * DET allocation. */ - if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable || - (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) - hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); + if ((context->res_ctx.pipe_ctx[i].update_flags.bits.disable || + (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM))) { + if (hubbub->funcs->program_det_size) + hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0); + } hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]); DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index a66676c00c6c..3f6876aafee0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -499,7 +499,7 @@ struct dcn_bw_writeback { struct dcn_bw_output { struct dc_clocks clk; - struct dcn_watermark_set watermarks; + union dcn_watermark_set watermarks; struct dcn_bw_writeback bw_writeback; int compbuf_size_kb; unsigned int mall_ss_size_bytes; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index 2ae7484d18af..305fdc127bfc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -160,7 +160,7 @@ struct hubbub_funcs { bool (*program_watermarks)( struct hubbub *hubbub, - struct dcn_watermark_set *watermarks, + union dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index b72fb314d804..86c12cd6f47d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -50,11 +50,13 @@ struct dcn_watermarks { uint32_t usr_retraining_ns; }; -struct dcn_watermark_set { - struct dcn_watermarks a; - struct dcn_watermarks b; - struct dcn_watermarks c; - struct dcn_watermarks d; +union dcn_watermark_set { + struct { + struct dcn_watermarks a; + struct dcn_watermarks b; + struct dcn_watermarks c; + struct dcn_watermarks d; + }; // legacy }; struct dce_watermarks { -- cgit v1.2.3-70-g09d2 From 514e816d2239bfbb11b63a4e33c8882b11ad3d61 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sat, 16 Mar 2024 22:01:29 -0400 Subject: drm/amd/display: [FW Promotion] Release 0.0.210.0 - Add Display PHY FSM command interface for automated testing Acked-by: Tom Chung Signed-off-by: Anthony Koo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 39 ++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 995a54459335..3bd9911b6f3a 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -824,6 +824,10 @@ enum dmub_cmd_vbios_type { * Query DP alt status on a transmitter. */ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26, + /** + * Control PHY FSM + */ + DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM = 29, /** * Controls domain power gating */ @@ -2345,6 +2349,7 @@ enum dmub_phy_fsm_state { DMUB_PHY_FSM_POWER_DOWN, DMUB_PHY_FSM_PLL_EN, DMUB_PHY_FSM_TX_EN, + DMUB_PHY_FSM_TX_EN_TEST_MODE, DMUB_PHY_FSM_FAST_LP, DMUB_PHY_FSM_P2_PLL_OFF_CPM, DMUB_PHY_FSM_P2_PLL_OFF_PG, @@ -3494,7 +3499,7 @@ enum hw_lock_client { /** * Replay is the client of HW Lock Manager. */ - HW_LOCK_CLIENT_REPLAY = 4, + HW_LOCK_CLIENT_REPLAY = 4, /** * Invalid client. */ @@ -4188,6 +4193,34 @@ struct dmub_rb_cmd_transmitter_query_dp_alt { struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */ }; +struct phy_test_mode { + uint8_t mode; + uint8_t pat0; + uint8_t pad[2]; +}; + +/** + * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command. + */ +struct dmub_rb_cmd_transmitter_set_phy_fsm_data { + uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */ + uint8_t mode; /**< HDMI/DP/DP2 etc */ + uint8_t lane_num; /**< Number of lanes */ + uint32_t symclk_100Hz; /**< PLL symclock in 100hz */ + struct phy_test_mode test_mode; + enum dmub_phy_fsm_state state; + uint32_t status; + uint8_t pad; +}; + +/** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command. + */ +struct dmub_rb_cmd_transmitter_set_phy_fsm { + struct dmub_cmd_header header; /**< header */ + struct dmub_rb_cmd_transmitter_set_phy_fsm_data data; /**< payload */ +}; + /** * Maximum number of bytes a chunk sent to DMUB for parsing */ @@ -4558,6 +4591,10 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. */ struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt; + /** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_SET_PHY_FSM command. + */ + struct dmub_rb_cmd_transmitter_set_phy_fsm set_phy_fsm; /** * Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command. */ -- cgit v1.2.3-70-g09d2 From af114efe8d24b5711cfbedf7180f2ac1a296c24b Mon Sep 17 00:00:00 2001 From: Muhammad Ahmed Date: Fri, 15 Mar 2024 18:30:26 -0400 Subject: drm/amd/display: Skip pipe if the pipe idx not set properly [why] Driver crashes when pipe idx not set properly [how] Add code to skip the pipe that idx not set properly Reviewed-by: Charlene Liu Acked-by: Tom Chung Signed-off-by: Muhammad Ahmed Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index ac2676bb9fcb..bedf1cd390df 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -327,6 +327,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id); } + if (dml_pipe_idx == 0xFFFFFFFF) + continue; ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id_valid[dml_pipe_idx]); ASSERT(in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_stream_id[dml_pipe_idx] == context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id); @@ -474,6 +476,9 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id); else dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, display_state->res_ctx.pipe_ctx[i].stream->stream_id); + + if (dml_pipe_idx == 0xFFFFFFFF) + continue; total_det_allocated += dml_get_det_buffer_size_kbytes(&in_ctx->v20.dml_core_ctx, dml_pipe_idx); if (total_det_allocated > max_det_size) { need_recalculation = true; -- cgit v1.2.3-70-g09d2 From b5f524b3ebef1646cf3ec8003452ad889fc59243 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 17 Mar 2024 21:16:31 -0400 Subject: drm/amd/display: 3.2.278 This version brings along following fixes: - Fix some bound and NULL check - Fix nonseamless transition from ODM + MPO to ODM + subvp - Allow Z8 when stutter threshold is not met - Remove plane and stream pointers from dc scratch - Remove read/write to external register - Increase number of hpo dp link encoders - Increase clock table size - Add new IPS config mode - Build scaling params when a new plane is appended - Refactor DML2 interfaces - Allow idle opts for no flip case on PSR panel Acked-by: Tom Chung Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index dd54de5f3b2f..6300ae2ea1f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -51,7 +51,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.277" +#define DC_VER "3.2.278" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3-70-g09d2 From 1c5c36530a573de1a4b647b7d8c36f3b298e60ed Mon Sep 17 00:00:00 2001 From: Xi Liu Date: Tue, 27 Feb 2024 13:39:00 -0500 Subject: drm/amd/display: Set DCN351 BB and IP the same as DCN35 [WHY & HOW] DCN351 and DCN35 should use the same bounding box and IP settings. Cc: Mario Limonciello Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Jun Lei Acked-by: Alex Hung Signed-off-by: Xi Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index cf98411d0799..151b480b3cea 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -229,17 +229,13 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s break; case dml_project_dcn35: + case dml_project_dcn351: out->num_chans = 4; out->round_trip_ping_latency_dcfclk_cycles = 106; out->smn_latency_us = 2; out->dispclk_dppclk_vco_speed_mhz = 3600; break; - case dml_project_dcn351: - out->num_chans = 16; - out->round_trip_ping_latency_dcfclk_cycles = 1100; - out->smn_latency_us = 2; - break; } /* ---Overrides if available--- */ if (dml2->config.bbox_overrides.dram_num_chan) -- cgit v1.2.3-70-g09d2 From f3e698978cfb3c9ece9e60acf0860dafece345ff Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Thu, 7 Mar 2024 11:13:19 +0800 Subject: drm/amdgpu/umsch: update UMSCH 4.0 FW interface Align with FW changes. Signed-off-by: Lang Yu Reviewed-by: Veerabadhran Gopalakrishnan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h | 20 ++++++++++---------- drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h | 13 +++++++++++-- 2 files changed, 21 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h index 8258a43a6236..5014b5af95fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h @@ -33,13 +33,6 @@ enum UMSCH_SWIP_ENGINE_TYPE { UMSCH_SWIP_ENGINE_TYPE_MAX }; -enum UMSCH_SWIP_AFFINITY_TYPE { - UMSCH_SWIP_AFFINITY_TYPE_ANY = 0, - UMSCH_SWIP_AFFINITY_TYPE_VCN0 = 1, - UMSCH_SWIP_AFFINITY_TYPE_VCN1 = 2, - UMSCH_SWIP_AFFINITY_TYPE_MAX -}; - enum UMSCH_CONTEXT_PRIORITY_LEVEL { CONTEXT_PRIORITY_LEVEL_IDLE = 0, CONTEXT_PRIORITY_LEVEL_NORMAL = 1, @@ -51,13 +44,15 @@ enum UMSCH_CONTEXT_PRIORITY_LEVEL { struct umsch_mm_set_resource_input { uint32_t vmid_mask_mm_vcn; uint32_t vmid_mask_mm_vpe; + uint32_t collaboration_mask_vpe; uint32_t logging_vmid; uint32_t engine_mask; union { struct { uint32_t disable_reset : 1; uint32_t disable_umsch_mm_log : 1; - uint32_t reserved : 30; + uint32_t use_rs64mem_for_proc_ctx_csa : 1; + uint32_t reserved : 29; }; uint32_t uint32_all; }; @@ -78,15 +73,18 @@ struct umsch_mm_add_queue_input { uint32_t doorbell_offset_1; enum UMSCH_SWIP_ENGINE_TYPE engine_type; uint32_t affinity; - enum UMSCH_SWIP_AFFINITY_TYPE affinity_type; uint64_t mqd_addr; uint64_t h_context; uint64_t h_queue; uint32_t vm_context_cntl; + uint32_t process_csa_array_index; + uint32_t context_csa_array_index; + struct { uint32_t is_context_suspended : 1; - uint32_t reserved : 31; + uint32_t collaboration_mode : 1; + uint32_t reserved : 30; }; }; @@ -94,6 +92,7 @@ struct umsch_mm_remove_queue_input { uint32_t doorbell_offset_0; uint32_t doorbell_offset_1; uint64_t context_csa_addr; + uint32_t context_csa_array_index; }; struct MQD_INFO { @@ -103,6 +102,7 @@ struct MQD_INFO { uint32_t wptr_val; uint32_t rptr_val; uint32_t unmapped; + uint32_t vmid; }; struct amdgpu_umsch_mm; diff --git a/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h b/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h index beadb9e42850..ca83e9e5c3ff 100644 --- a/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h +++ b/drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h @@ -234,7 +234,8 @@ union UMSCHAPI__SET_HW_RESOURCES { uint32_t enable_level_process_quantum_check : 1; uint32_t is_vcn0_enabled : 1; uint32_t is_vcn1_enabled : 1; - uint32_t reserved : 27; + uint32_t use_rs64mem_for_proc_ctx_csa : 1; + uint32_t reserved : 26; }; uint32_t uint32_all; }; @@ -297,9 +298,12 @@ union UMSCHAPI__ADD_QUEUE { struct { uint32_t is_context_suspended : 1; - uint32_t reserved : 31; + uint32_t collaboration_mode : 1; + uint32_t reserved : 30; }; struct UMSCH_API_STATUS api_status; + uint32_t process_csa_array_index; + uint32_t context_csa_array_index; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -314,6 +318,7 @@ union UMSCHAPI__REMOVE_QUEUE { uint64_t context_csa_addr; struct UMSCH_API_STATUS api_status; + uint32_t context_csa_array_index; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -337,6 +342,7 @@ union UMSCHAPI__SUSPEND { uint32_t suspend_fence_value; struct UMSCH_API_STATUS api_status; + uint32_t context_csa_array_index; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -356,6 +362,7 @@ union UMSCHAPI__RESUME { enum UMSCH_ENGINE_TYPE engine_type; struct UMSCH_API_STATUS api_status; + uint32_t context_csa_array_index; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -404,6 +411,7 @@ union UMSCHAPI__UPDATE_AFFINITY { union UMSCH_AFFINITY affinity; uint64_t context_csa_addr; struct UMSCH_API_STATUS api_status; + uint32_t context_csa_array_index; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; @@ -417,6 +425,7 @@ union UMSCHAPI__CHANGE_CONTEXT_PRIORITY_LEVEL { uint64_t context_quantum; uint64_t context_csa_addr; struct UMSCH_API_STATUS api_status; + uint32_t context_csa_array_index; }; uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; -- cgit v1.2.3-70-g09d2 From b9a8aee136b70d032f078de98ab6f8cc3eaebf3e Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Thu, 7 Mar 2024 13:57:06 +0800 Subject: drm/amdgpu: enable UMSCH 4.0.6 Share same codes with 4.0.5 and enable collaborate mode for VPE. Signed-off-by: Lang Yu Reviewed-by: Veerabadhran Gopalakrishnan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c | 12 ++++++++++-- drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c | 7 +++++-- 3 files changed, 16 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 230412fc4d62..362da1413e1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -2246,6 +2246,7 @@ static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev) { switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { case IP_VERSION(4, 0, 5): + case IP_VERSION(4, 0, 6): if (amdgpu_umsch_mm & 0x1) { amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block); adev->enable_umsch_mm = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c index ab820cf52668..0df97c3e3a70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c @@ -189,10 +189,13 @@ static void setup_vpe_queue(struct amdgpu_device *adev, mqd->rptr_val = 0; mqd->unmapped = 1; + if (adev->vpe.collaborate_mode) + memcpy(++mqd, test->mqd_data_cpu_addr, sizeof(struct MQD_INFO)); + qinfo->mqd_addr = test->mqd_data_gpu_addr; qinfo->csa_addr = test->ctx_data_gpu_addr + offsetof(struct umsch_mm_test_ctx_data, vpe_ctx_csa); - qinfo->doorbell_offset_0 = (adev->doorbell_index.vpe_ring + 1) << 1; + qinfo->doorbell_offset_0 = 0; qinfo->doorbell_offset_1 = 0; } @@ -287,7 +290,10 @@ static int submit_vpe_queue(struct amdgpu_device *adev, struct umsch_mm_test *te ring[5] = 0; mqd->wptr_val = (6 << 2); - // WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val); + if (adev->vpe.collaborate_mode) + (++mqd)->wptr_val = (6 << 2); + + WDOORBELL32(adev->umsch_mm.agdb_index[CONTEXT_PRIORITY_LEVEL_NORMAL], mqd->wptr_val); for (i = 0; i < adev->usec_timeout; i++) { if (*fence == test_pattern) @@ -571,6 +577,7 @@ int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch) switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { case IP_VERSION(4, 0, 5): + case IP_VERSION(4, 0, 6): fw_name = "amdgpu/umsch_mm_4_0_0.bin"; break; default: @@ -750,6 +757,7 @@ static int umsch_mm_early_init(void *handle) switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) { case IP_VERSION(4, 0, 5): + case IP_VERSION(4, 0, 6): umsch_mm_v4_0_set_funcs(&adev->umsch_mm); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c index 8e7b763cfdb7..84368cf1e175 100644 --- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c @@ -60,7 +60,7 @@ static int umsch_mm_v4_0_load_microcode(struct amdgpu_umsch_mm *umsch) umsch->cmd_buf_curr_ptr = umsch->cmd_buf_ptr; - if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) { + if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) { WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG, 1 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT); SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS, @@ -248,7 +248,7 @@ static int umsch_mm_v4_0_ring_stop(struct amdgpu_umsch_mm *umsch) data = REG_SET_FIELD(data, VCN_UMSCH_RB_DB_CTRL, EN, 0); WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_DB_CTRL, data); - if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 5)) { + if (amdgpu_ip_version(adev, VCN_HWIP, 0) >= IP_VERSION(4, 0, 5)) { WREG32_SOC15(VCN, 0, regUVD_IPX_DLDO_CONFIG, 2 << UVD_IPX_DLDO_CONFIG__ONO0_PWR_CONFIG__SHIFT); SOC15_WAIT_ON_RREG(VCN, 0, regUVD_IPX_DLDO_STATUS, @@ -271,6 +271,8 @@ static int umsch_mm_v4_0_set_hw_resources(struct amdgpu_umsch_mm *umsch) set_hw_resources.vmid_mask_mm_vcn = umsch->vmid_mask_mm_vcn; set_hw_resources.vmid_mask_mm_vpe = umsch->vmid_mask_mm_vpe; + set_hw_resources.collaboration_mask_vpe = + adev->vpe.collaborate_mode ? 0x3 : 0x0; set_hw_resources.engine_mask = umsch->engine_mask; set_hw_resources.vcn0_hqd_mask[0] = umsch->vcn0_hqd_mask; @@ -346,6 +348,7 @@ static int umsch_mm_v4_0_add_queue(struct amdgpu_umsch_mm *umsch, add_queue.h_queue = input_ptr->h_queue; add_queue.vm_context_cntl = input_ptr->vm_context_cntl; add_queue.is_context_suspended = input_ptr->is_context_suspended; + add_queue.collaboration_mode = adev->vpe.collaborate_mode ? 1 : 0; add_queue.api_status.api_completion_fence_addr = umsch->ring.fence_drv.gpu_addr; add_queue.api_status.api_completion_fence_value = ++umsch->ring.fence_drv.sync_seq; -- cgit v1.2.3-70-g09d2 From c25d09bcb79fa6a6f45beb558ccc77e68f757e19 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 7 Mar 2024 17:07:37 -0500 Subject: drm/amdgpu: fix deadlock while reading mqd from debugfs An errant disk backup on my desktop got into debugfs and triggered the following deadlock scenario in the amdgpu debugfs files. The machine also hard-resets immediately after those lines are printed (although I wasn't able to reproduce that part when reading by hand): [ 1318.016074][ T1082] ====================================================== [ 1318.016607][ T1082] WARNING: possible circular locking dependency detected [ 1318.017107][ T1082] 6.8.0-rc7-00015-ge0c8221b72c0 #17 Not tainted [ 1318.017598][ T1082] ------------------------------------------------------ [ 1318.018096][ T1082] tar/1082 is trying to acquire lock: [ 1318.018585][ T1082] ffff98c44175d6a0 (&mm->mmap_lock){++++}-{3:3}, at: __might_fault+0x40/0x80 [ 1318.019084][ T1082] [ 1318.019084][ T1082] but task is already holding lock: [ 1318.020052][ T1082] ffff98c4c13f55f8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: amdgpu_debugfs_mqd_read+0x6a/0x250 [amdgpu] [ 1318.020607][ T1082] [ 1318.020607][ T1082] which lock already depends on the new lock. [ 1318.020607][ T1082] [ 1318.022081][ T1082] [ 1318.022081][ T1082] the existing dependency chain (in reverse order) is: [ 1318.023083][ T1082] [ 1318.023083][ T1082] -> #2 (reservation_ww_class_mutex){+.+.}-{3:3}: [ 1318.024114][ T1082] __ww_mutex_lock.constprop.0+0xe0/0x12f0 [ 1318.024639][ T1082] ww_mutex_lock+0x32/0x90 [ 1318.025161][ T1082] dma_resv_lockdep+0x18a/0x330 [ 1318.025683][ T1082] do_one_initcall+0x6a/0x350 [ 1318.026210][ T1082] kernel_init_freeable+0x1a3/0x310 [ 1318.026728][ T1082] kernel_init+0x15/0x1a0 [ 1318.027242][ T1082] ret_from_fork+0x2c/0x40 [ 1318.027759][ T1082] ret_from_fork_asm+0x11/0x20 [ 1318.028281][ T1082] [ 1318.028281][ T1082] -> #1 (reservation_ww_class_acquire){+.+.}-{0:0}: [ 1318.029297][ T1082] dma_resv_lockdep+0x16c/0x330 [ 1318.029790][ T1082] do_one_initcall+0x6a/0x350 [ 1318.030263][ T1082] kernel_init_freeable+0x1a3/0x310 [ 1318.030722][ T1082] kernel_init+0x15/0x1a0 [ 1318.031168][ T1082] ret_from_fork+0x2c/0x40 [ 1318.031598][ T1082] ret_from_fork_asm+0x11/0x20 [ 1318.032011][ T1082] [ 1318.032011][ T1082] -> #0 (&mm->mmap_lock){++++}-{3:3}: [ 1318.032778][ T1082] __lock_acquire+0x14bf/0x2680 [ 1318.033141][ T1082] lock_acquire+0xcd/0x2c0 [ 1318.033487][ T1082] __might_fault+0x58/0x80 [ 1318.033814][ T1082] amdgpu_debugfs_mqd_read+0x103/0x250 [amdgpu] [ 1318.034181][ T1082] full_proxy_read+0x55/0x80 [ 1318.034487][ T1082] vfs_read+0xa7/0x360 [ 1318.034788][ T1082] ksys_read+0x70/0xf0 [ 1318.035085][ T1082] do_syscall_64+0x94/0x180 [ 1318.035375][ T1082] entry_SYSCALL_64_after_hwframe+0x46/0x4e [ 1318.035664][ T1082] [ 1318.035664][ T1082] other info that might help us debug this: [ 1318.035664][ T1082] [ 1318.036487][ T1082] Chain exists of: [ 1318.036487][ T1082] &mm->mmap_lock --> reservation_ww_class_acquire --> reservation_ww_class_mutex [ 1318.036487][ T1082] [ 1318.037310][ T1082] Possible unsafe locking scenario: [ 1318.037310][ T1082] [ 1318.037838][ T1082] CPU0 CPU1 [ 1318.038101][ T1082] ---- ---- [ 1318.038350][ T1082] lock(reservation_ww_class_mutex); [ 1318.038590][ T1082] lock(reservation_ww_class_acquire); [ 1318.038839][ T1082] lock(reservation_ww_class_mutex); [ 1318.039083][ T1082] rlock(&mm->mmap_lock); [ 1318.039328][ T1082] [ 1318.039328][ T1082] *** DEADLOCK *** [ 1318.039328][ T1082] [ 1318.040029][ T1082] 1 lock held by tar/1082: [ 1318.040259][ T1082] #0: ffff98c4c13f55f8 (reservation_ww_class_mutex){+.+.}-{3:3}, at: amdgpu_debugfs_mqd_read+0x6a/0x250 [amdgpu] [ 1318.040560][ T1082] [ 1318.040560][ T1082] stack backtrace: [ 1318.041053][ T1082] CPU: 22 PID: 1082 Comm: tar Not tainted 6.8.0-rc7-00015-ge0c8221b72c0 #17 3316c85d50e282c5643b075d1f01a4f6365e39c2 [ 1318.041329][ T1082] Hardware name: Gigabyte Technology Co., Ltd. B650 AORUS PRO AX/B650 AORUS PRO AX, BIOS F20 12/14/2023 [ 1318.041614][ T1082] Call Trace: [ 1318.041895][ T1082] [ 1318.042175][ T1082] dump_stack_lvl+0x4a/0x80 [ 1318.042460][ T1082] check_noncircular+0x145/0x160 [ 1318.042743][ T1082] __lock_acquire+0x14bf/0x2680 [ 1318.043022][ T1082] lock_acquire+0xcd/0x2c0 [ 1318.043301][ T1082] ? __might_fault+0x40/0x80 [ 1318.043580][ T1082] ? __might_fault+0x40/0x80 [ 1318.043856][ T1082] __might_fault+0x58/0x80 [ 1318.044131][ T1082] ? __might_fault+0x40/0x80 [ 1318.044408][ T1082] amdgpu_debugfs_mqd_read+0x103/0x250 [amdgpu 8fe2afaa910cbd7654c8cab23563a94d6caebaab] [ 1318.044749][ T1082] full_proxy_read+0x55/0x80 [ 1318.045042][ T1082] vfs_read+0xa7/0x360 [ 1318.045333][ T1082] ksys_read+0x70/0xf0 [ 1318.045623][ T1082] do_syscall_64+0x94/0x180 [ 1318.045913][ T1082] ? do_syscall_64+0xa0/0x180 [ 1318.046201][ T1082] ? lockdep_hardirqs_on+0x7d/0x100 [ 1318.046487][ T1082] ? do_syscall_64+0xa0/0x180 [ 1318.046773][ T1082] ? do_syscall_64+0xa0/0x180 [ 1318.047057][ T1082] ? do_syscall_64+0xa0/0x180 [ 1318.047337][ T1082] ? do_syscall_64+0xa0/0x180 [ 1318.047611][ T1082] entry_SYSCALL_64_after_hwframe+0x46/0x4e [ 1318.047887][ T1082] RIP: 0033:0x7f480b70a39d [ 1318.048162][ T1082] Code: 91 ba 0d 00 f7 d8 64 89 02 b8 ff ff ff ff eb b2 e8 18 a3 01 00 0f 1f 84 00 00 00 00 00 80 3d a9 3c 0e 00 00 74 17 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 5b c3 66 2e 0f 1f 84 00 00 00 00 00 53 48 83 [ 1318.048769][ T1082] RSP: 002b:00007ffde77f5c68 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 [ 1318.049083][ T1082] RAX: ffffffffffffffda RBX: 0000000000000800 RCX: 00007f480b70a39d [ 1318.049392][ T1082] RDX: 0000000000000800 RSI: 000055c9f2120c00 RDI: 0000000000000008 [ 1318.049703][ T1082] RBP: 0000000000000800 R08: 000055c9f2120a94 R09: 0000000000000007 [ 1318.050011][ T1082] R10: 0000000000000000 R11: 0000000000000246 R12: 000055c9f2120c00 [ 1318.050324][ T1082] R13: 0000000000000008 R14: 0000000000000008 R15: 0000000000000800 [ 1318.050638][ T1082] amdgpu_debugfs_mqd_read() holds a reservation when it calls put_user(), which may fault and acquire the mmap_sem. This violates the established locking order. Bounce the mqd data through a kernel buffer to get put_user() out of the illegal section. Fixes: 445d85e3c1df ("drm/amdgpu: add debugfs interface for reading MQDs") Cc: stable@vger.kernel.org # v6.5+ Reviewed-by: Shashank Sharma Signed-off-by: Johannes Weiner Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 46 ++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 5505d646f43a..06f0a6534a94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -524,46 +524,58 @@ static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf, { struct amdgpu_ring *ring = file_inode(f)->i_private; volatile u32 *mqd; - int r; + u32 *kbuf; + int r, i; uint32_t value, result; if (*pos & 3 || size & 3) return -EINVAL; - result = 0; + kbuf = kmalloc(ring->mqd_size, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; r = amdgpu_bo_reserve(ring->mqd_obj, false); if (unlikely(r != 0)) - return r; + goto err_free; r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); - if (r) { - amdgpu_bo_unreserve(ring->mqd_obj); - return r; - } + if (r) + goto err_unreserve; + /* + * Copy to local buffer to avoid put_user(), which might fault + * and acquire mmap_sem, under reservation_ww_class_mutex. + */ + for (i = 0; i < ring->mqd_size/sizeof(u32); i++) + kbuf[i] = mqd[i]; + + amdgpu_bo_kunmap(ring->mqd_obj); + amdgpu_bo_unreserve(ring->mqd_obj); + + result = 0; while (size) { if (*pos >= ring->mqd_size) - goto done; + break; - value = mqd[*pos/4]; + value = kbuf[*pos/4]; r = put_user(value, (uint32_t *)buf); if (r) - goto done; + goto err_free; buf += 4; result += 4; size -= 4; *pos += 4; } -done: - amdgpu_bo_kunmap(ring->mqd_obj); - mqd = NULL; - amdgpu_bo_unreserve(ring->mqd_obj); - if (r) - return r; - + kfree(kbuf); return result; + +err_unreserve: + amdgpu_bo_unreserve(ring->mqd_obj); +err_free: + kfree(kbuf); + return r; } static const struct file_operations amdgpu_debugfs_mqd_fops = { -- cgit v1.2.3-70-g09d2 From d7e8ddc392a3659206e7973e75c47849f2c8b72a Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Tue, 26 Mar 2024 15:32:46 -0400 Subject: drm/amdkfd: Reset GPU on queue preemption failure Currently, with F32 HWS GPU reset is only when unmap queue fails. However, if compute queue doesn't repond to preemption request in time unmap will return without any error. In this case, only preemption error is logged and Reset is not triggered. Call GPU reset in this case also. Reviewed-by: Alex Deucher Signed-off-by: Harish Kasiviswanathan Reviewed-by: Mukul Joshi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 151fabf84040..c08b6ee25289 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2000,6 +2000,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, if (mqd_mgr->check_preemption_failed(mqd_mgr, dqm->packet_mgr.priv_queue->queue->mqd)) { while (halt_if_hws_hang) schedule(); + kfd_hws_hang(dqm); return -ETIME; } -- cgit v1.2.3-70-g09d2 From d7f1487643552f18d1855474eba54a8ff3655935 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Sat, 23 Mar 2024 20:46:53 -0400 Subject: drm/amdgpu: always force full reset for SOC21 There are cases where soft reset seems to succeed, but does not, so always use mode1/2 for now. Reviewed-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc21.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 581a3bd11481..8526282f4da1 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -457,10 +457,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev) { switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): - return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): - return false; default: return true; } -- cgit v1.2.3-70-g09d2 From 2ea6f4d94d0d215798eb1a623f43e5e0fb526f4a Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Tue, 26 Mar 2024 16:54:03 +0800 Subject: drm/amdgpu: make amdgpu device attr_update() function more efficient v1: add a new enumeration type to identify device attribute node, this method is relatively more efficient compared with 'strcmp' in update_attr() function. v2: rename device_attr_type to device_attr_id. Signed-off-by: Yang Wang Reviewed-by: Ma Jun Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 4 ++-- drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h | 41 ++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 85e935556d7d..b410df28ccb2 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2226,16 +2226,16 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ uint32_t mask, enum amdgpu_device_attr_states *states) { struct device_attribute *dev_attr = &attr->dev_attr; + enum amdgpu_device_attr_id attr_id = attr->attr_id; uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); - const char *attr_name = dev_attr->attr.name; if (!(attr->flags & mask)) { *states = ATTR_STATE_UNSUPPORTED; return 0; } -#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name)) +#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name) if (DEVICE_ATTR_IS(pp_dpm_socclk)) { if (gc_ver < IP_VERSION(9, 0, 0)) diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h index eec816f0cbf9..448ba3a14584 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h @@ -43,8 +43,48 @@ enum amdgpu_device_attr_states { ATTR_STATE_SUPPORTED, }; +enum amdgpu_device_attr_id { + device_attr_id__unknown = -1, + device_attr_id__power_dpm_state = 0, + device_attr_id__power_dpm_force_performance_level, + device_attr_id__pp_num_states, + device_attr_id__pp_cur_state, + device_attr_id__pp_force_state, + device_attr_id__pp_table, + device_attr_id__pp_dpm_sclk, + device_attr_id__pp_dpm_mclk, + device_attr_id__pp_dpm_socclk, + device_attr_id__pp_dpm_fclk, + device_attr_id__pp_dpm_vclk, + device_attr_id__pp_dpm_vclk1, + device_attr_id__pp_dpm_dclk, + device_attr_id__pp_dpm_dclk1, + device_attr_id__pp_dpm_dcefclk, + device_attr_id__pp_dpm_pcie, + device_attr_id__pp_sclk_od, + device_attr_id__pp_mclk_od, + device_attr_id__pp_power_profile_mode, + device_attr_id__pp_od_clk_voltage, + device_attr_id__gpu_busy_percent, + device_attr_id__mem_busy_percent, + device_attr_id__vcn_busy_percent, + device_attr_id__pcie_bw, + device_attr_id__pp_features, + device_attr_id__unique_id, + device_attr_id__thermal_throttling_logging, + device_attr_id__apu_thermal_cap, + device_attr_id__gpu_metrics, + device_attr_id__smartshift_apu_power, + device_attr_id__smartshift_dgpu_power, + device_attr_id__smartshift_bias, + device_attr_id__xgmi_plpd_policy, + device_attr_id__pm_metrics, + device_attr_id__count, +}; + struct amdgpu_device_attr { struct device_attribute dev_attr; + enum amdgpu_device_attr_id attr_id; enum amdgpu_device_attr_flags flags; int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states); @@ -61,6 +101,7 @@ struct amdgpu_device_attr_entry { #define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \ { .dev_attr = __ATTR(_name, _mode, _show, _store), \ + .attr_id = device_attr_id__##_name, \ .flags = _flags, \ ##__VA_ARGS__, } -- cgit v1.2.3-70-g09d2 From cd409dbc6986bc5f9bfacbbc968886531d47d5ce Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 21 Mar 2024 17:46:36 +0530 Subject: drm/amdgpu: Refine IB schedule error logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Downgrade to debug information when IBs are skipped. Also, use dev_* to identify the device. Signed-off-by: Lijo Lazar Reviewed-by: Christian König Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 4b3000c21ef2..e4742b65032d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -304,12 +304,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) dma_fence_set_error(finished, -ECANCELED); if (finished->error < 0) { - DRM_INFO("Skip scheduling IBs!\n"); + dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)", + ring->name); } else { r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, &fence); if (r) - DRM_ERROR("Error scheduling IBs (%d)\n", r); + dev_err(adev->dev, + "Error scheduling IBs (%d) in ring(%s)", r, + ring->name); } job->job_run_counter++; -- cgit v1.2.3-70-g09d2 From 108ab31be9d5c6efdcf522577d6f3da2a9b34056 Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Mon, 25 Mar 2024 13:24:31 +0800 Subject: drm/amdgpu/umsch: reinitialize write pointer in hw init Otherwise the old one will be used during GPU reset. That's not expected. Signed-off-by: Lang Yu Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c index 84368cf1e175..bd57896ab85d 100644 --- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c @@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch) WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size); + ring->wptr = 0; + data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK); WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data); -- cgit v1.2.3-70-g09d2 From 6a0e1bafd70fe5c5d8dcf7d2ddff9377701343c3 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Tue, 26 Mar 2024 22:59:17 +0530 Subject: drm/amdgpu: add IP's FW information to devcoredump Add FW information of all the IP's in the devcoredump. Signed-off-by: Sunil Khatri Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c | 129 +++++++++++++++++++++++ 1 file changed, 129 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 44c5da8aa9ce..1129e5e5fb42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -25,6 +25,7 @@ #include #include #include "amdgpu_dev_coredump.h" +#include "atom.h" #ifndef CONFIG_DEV_COREDUMP void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost, @@ -69,6 +70,130 @@ const char *hw_ip_names[MAX_HWIP] = { [PCIE_HWIP] = "PCIE", }; +static void amdgpu_devcoredump_fw_info(struct amdgpu_device *adev, + struct drm_printer *p) +{ + uint32_t version; + uint32_t feature; + uint8_t smu_program, smu_major, smu_minor, smu_debug; + struct atom_context *ctx = adev->mode_info.atom_context; + + drm_printf(p, "VCE feature version: %u, fw version: 0x%08x\n", + adev->vce.fb_version, adev->vce.fw_version); + drm_printf(p, "UVD feature version: %u, fw version: 0x%08x\n", 0, + adev->uvd.fw_version); + drm_printf(p, "GMC feature version: %u, fw version: 0x%08x\n", 0, + adev->gmc.fw_version); + drm_printf(p, "ME feature version: %u, fw version: 0x%08x\n", + adev->gfx.me_feature_version, adev->gfx.me_fw_version); + drm_printf(p, "PFP feature version: %u, fw version: 0x%08x\n", + adev->gfx.pfp_feature_version, adev->gfx.pfp_fw_version); + drm_printf(p, "CE feature version: %u, fw version: 0x%08x\n", + adev->gfx.ce_feature_version, adev->gfx.ce_fw_version); + drm_printf(p, "RLC feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlc_feature_version, adev->gfx.rlc_fw_version); + + drm_printf(p, "RLC SRLC feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlc_srlc_feature_version, + adev->gfx.rlc_srlc_fw_version); + drm_printf(p, "RLC SRLG feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlc_srlg_feature_version, + adev->gfx.rlc_srlg_fw_version); + drm_printf(p, "RLC SRLS feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlc_srls_feature_version, + adev->gfx.rlc_srls_fw_version); + drm_printf(p, "RLCP feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlcp_ucode_feature_version, + adev->gfx.rlcp_ucode_version); + drm_printf(p, "RLCV feature version: %u, fw version: 0x%08x\n", + adev->gfx.rlcv_ucode_feature_version, + adev->gfx.rlcv_ucode_version); + drm_printf(p, "MEC feature version: %u, fw version: 0x%08x\n", + adev->gfx.mec_feature_version, adev->gfx.mec_fw_version); + + if (adev->gfx.mec2_fw) + drm_printf(p, "MEC2 feature version: %u, fw version: 0x%08x\n", + adev->gfx.mec2_feature_version, + adev->gfx.mec2_fw_version); + + drm_printf(p, "IMU feature version: %u, fw version: 0x%08x\n", 0, + adev->gfx.imu_fw_version); + drm_printf(p, "PSP SOS feature version: %u, fw version: 0x%08x\n", + adev->psp.sos.feature_version, adev->psp.sos.fw_version); + drm_printf(p, "PSP ASD feature version: %u, fw version: 0x%08x\n", + adev->psp.asd_context.bin_desc.feature_version, + adev->psp.asd_context.bin_desc.fw_version); + + drm_printf(p, "TA XGMI feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.xgmi_context.context.bin_desc.feature_version, + adev->psp.xgmi_context.context.bin_desc.fw_version); + drm_printf(p, "TA RAS feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.ras_context.context.bin_desc.feature_version, + adev->psp.ras_context.context.bin_desc.fw_version); + drm_printf(p, "TA HDCP feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.hdcp_context.context.bin_desc.feature_version, + adev->psp.hdcp_context.context.bin_desc.fw_version); + drm_printf(p, "TA DTM feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.dtm_context.context.bin_desc.feature_version, + adev->psp.dtm_context.context.bin_desc.fw_version); + drm_printf(p, "TA RAP feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.rap_context.context.bin_desc.feature_version, + adev->psp.rap_context.context.bin_desc.fw_version); + drm_printf(p, + "TA SECURE DISPLAY feature version: 0x%08x, fw version: 0x%08x\n", + adev->psp.securedisplay_context.context.bin_desc.feature_version, + adev->psp.securedisplay_context.context.bin_desc.fw_version); + + /* SMC firmware */ + version = adev->pm.fw_version; + + smu_program = (version >> 24) & 0xff; + smu_major = (version >> 16) & 0xff; + smu_minor = (version >> 8) & 0xff; + smu_debug = (version >> 0) & 0xff; + drm_printf(p, + "SMC feature version: %u, program: %d, fw version: 0x%08x (%d.%d.%d)\n", + 0, smu_program, version, smu_major, smu_minor, smu_debug); + + /* SDMA firmware */ + for (int i = 0; i < adev->sdma.num_instances; i++) { + drm_printf(p, + "SDMA%d feature version: %u, firmware version: 0x%08x\n", + i, adev->sdma.instance[i].feature_version, + adev->sdma.instance[i].fw_version); + } + + drm_printf(p, "VCN feature version: %u, fw version: 0x%08x\n", 0, + adev->vcn.fw_version); + drm_printf(p, "DMCU feature version: %u, fw version: 0x%08x\n", 0, + adev->dm.dmcu_fw_version); + drm_printf(p, "DMCUB feature version: %u, fw version: 0x%08x\n", 0, + adev->dm.dmcub_fw_version); + drm_printf(p, "PSP TOC feature version: %u, fw version: 0x%08x\n", + adev->psp.toc.feature_version, adev->psp.toc.fw_version); + + version = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK; + feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK) >> + AMDGPU_MES_FEAT_VERSION_SHIFT; + drm_printf(p, "MES_KIQ feature version: %u, fw version: 0x%08x\n", + feature, version); + + version = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; + feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK) >> + AMDGPU_MES_FEAT_VERSION_SHIFT; + drm_printf(p, "MES feature version: %u, fw version: 0x%08x\n", feature, + version); + + drm_printf(p, "VPE feature version: %u, fw version: 0x%08x\n", + adev->vpe.feature_version, adev->vpe.fw_version); + + drm_printf(p, "\nVBIOS Information\n"); + drm_printf(p, "name: %s\n", ctx->name); + drm_printf(p, "pn %s\n", ctx->vbios_pn); + drm_printf(p, "version: %s\n", ctx->vbios_ver_str); + drm_printf(p, "date: %s\n", ctx->date); +} + static ssize_t amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) @@ -118,6 +243,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count, } } + /* IP firmware information */ + drm_printf(&p, "\nIP Firmwares\n"); + amdgpu_devcoredump_fw_info(coredump->adev, &p); + if (coredump->ring) { drm_printf(&p, "\nRing timed out details\n"); drm_printf(&p, "IP Type: %d Ring Name: %s\n", -- cgit v1.2.3-70-g09d2 From df3c7dc5c58b1f85033d2cd9a121b27844700ca2 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 14 Feb 2024 17:55:54 +0530 Subject: drm/amdgpu: Reset dGPU if suspend got aborted For SOC21 ASICs, there is an issue in re-enabling PM features if a suspend got aborted. In such cases, reset the device during resume phase. This is a workaround till a proper solution is finalized. Signed-off-by: Lijo Lazar Reviewed-by: Alex Deucher Reviewed-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/soc21.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 8526282f4da1..abe319b0f063 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -867,10 +867,35 @@ static int soc21_common_suspend(void *handle) return soc21_common_hw_fini(adev); } +static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg1, sol_reg2; + + /* Will reset for the following suspend abort cases. + * 1) Only reset dGPU side. + * 2) S3 suspend got aborted and TOS is active. + */ + if (!(adev->flags & AMD_IS_APU) && adev->in_s3 && + !adev->suspend_complete) { + sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); + msleep(100); + sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); + + return (sol_reg1 != sol_reg2); + } + + return false; +} + static int soc21_common_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (soc21_need_reset_on_resume(adev)) { + dev_info(adev->dev, "S3 suspend aborted, resetting..."); + soc21_asic_reset(adev); + } + return soc21_common_hw_init(adev); } -- cgit v1.2.3-70-g09d2 From 2dbe9c2b2685b7e3ad7e1fb49fd2b7c65ca2c9c6 Mon Sep 17 00:00:00 2001 From: Li Ma Date: Thu, 28 Mar 2024 10:55:10 +0800 Subject: drm/amd/display: add DCN 351 version for microcode load There is a new DCN veriosn 3.5.1 need to load Signed-off-by: Li Ma Reviewed-by: Yifan Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 39d0fcc10aea..5336d3615580 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); +#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); + /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 @@ -4820,9 +4823,11 @@ static int dm_init_microcode(struct amdgpu_device *adev) fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; break; case IP_VERSION(3, 5, 0): - case IP_VERSION(3, 5, 1): fw_name_dmub = FIRMWARE_DCN_35_DMUB; break; + case IP_VERSION(3, 5, 1): + fw_name_dmub = FIRMWARE_DCN_351_DMUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; -- cgit v1.2.3-70-g09d2 From 81d96e8b5a85e3ebb85342525aab02e89bec72dc Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 28 Mar 2024 13:46:14 +0800 Subject: drm/amdgpu: refine function signature of amdgpu_aca_get_error_data() refine function signature of amdgpu_aca_get_error_data(); Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 8 +++----- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h | 6 +++++- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index c3242dbf8355..c02634a124dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -525,10 +525,9 @@ static bool aca_handle_is_valid(struct aca_handle *handle) } int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, - enum aca_error_type type, void *data, void *qctx) + enum aca_error_type type, struct ras_err_data *err_data, + struct ras_query_context *qctx) { - struct ras_err_data *err_data = (struct ras_err_data *)data; - if (!handle || !err_data) return -EINVAL; @@ -538,8 +537,7 @@ int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *han if (!(BIT(type) & handle->mask)) return 0; - return __aca_get_error_data(adev, handle, type, err_data, - (struct ras_query_context *)qctx); + return __aca_get_error_data(adev, handle, type, err_data, qctx); } static void aca_error_init(struct aca_error *aerr, enum aca_error_type type) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h index 247968d6a925..3765843ea648 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -26,6 +26,9 @@ #include +struct ras_err_data; +struct ras_query_context; + #define ACA_MAX_REGS_COUNT (16) #define ACA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l) @@ -198,7 +201,8 @@ int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle, const char *name, const struct aca_info *aca_info, void *data); void amdgpu_aca_remove_handle(struct aca_handle *handle); int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, - enum aca_error_type type, void *data, void *qctx); + enum aca_error_type type, struct ras_err_data *err_data, + struct ras_query_context *qctx); int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en); void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root); int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info, -- cgit v1.2.3-70-g09d2 From fedb6ae49758658bd4a90689c26dc3c3a3f9c3cd Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Wed, 27 Mar 2024 13:10:37 +0800 Subject: drm/amd/pm: fixes a random hang in S4 for SMU v13.0.4/11 While doing multiple S4 stress tests, GC/RLC/PMFW get into an invalid state resulting into hard hangs. Adding a GFX reset as workaround just before sending the MP1_UNLOAD message avoids this failure. Signed-off-by: Tim Huang Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index e8119918ef6b..88f1a0d878f3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!en && !adev->in_s0ix) + if (!en && !adev->in_s0ix) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); + } return ret; } -- cgit v1.2.3-70-g09d2 From e58acb7613aa5e4be59c05226373a40dc7c25dc8 Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Fri, 22 Mar 2024 12:25:16 -0400 Subject: drm/amdgpu : Add mes_log_enable to control mes log feature The MES log might slow down the performance for extra step of log the data, disable it by default and introduce a parameter can enable it when necessary Signed-off-by: shaoyunl Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 7 +++++-- 4 files changed, 20 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9c62552bec34..b3b84647207e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -210,6 +210,7 @@ extern int amdgpu_async_gfx_ring; extern int amdgpu_mcbp; extern int amdgpu_discovery; extern int amdgpu_mes; +extern int amdgpu_mes_log_enable; extern int amdgpu_mes_kiq; extern int amdgpu_noretry; extern int amdgpu_force_asic_type; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 80b9642f2bc4..e4277298cf1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -195,6 +195,7 @@ int amdgpu_async_gfx_ring = 1; int amdgpu_mcbp = -1; int amdgpu_discovery = -1; int amdgpu_mes; +int amdgpu_mes_log_enable = 0; int amdgpu_mes_kiq; int amdgpu_noretry = -1; int amdgpu_force_asic_type = -1; @@ -667,6 +668,15 @@ MODULE_PARM_DESC(mes, "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)"); module_param_named(mes, amdgpu_mes, int, 0444); +/** + * DOC: mes_log_enable (int) + * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log. + * (0 = disabled (default), 1 = enabled) + */ +MODULE_PARM_DESC(mes_log_enable, + "Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)"); +module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444); + /** * DOC: mes_kiq (int) * Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index e4fcc950e542..281efcd3ba8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -100,6 +100,9 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) { int r; + if (!amdgpu_mes_log_enable) + return 0; + r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->mes.event_log_gpu_obj, @@ -1561,7 +1564,7 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; - if (adev->enable_mes) + if (adev->enable_mes && amdgpu_mes_log_enable) debugfs_create_file("amdgpu_mes_event_log", 0444, root, adev, &amdgpu_debugfs_mes_event_log_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 072c478665ad..63f281a9984d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -411,8 +411,11 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes_set_hw_res_pkt.enable_reg_active_poll = 1; mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; mes_set_hw_res_pkt.oversubscription_timer = 50; - mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; - mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr; + if (amdgpu_mes_log_enable) { + mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; + mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = + mes->event_log_gpu_addr; + } return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), -- cgit v1.2.3-70-g09d2 From 8966c3167402576d8ae4ab3914052e5e412e659b Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Fri, 22 Mar 2024 12:44:55 -0400 Subject: drm/amdgpu : Increase the mes log buffer size as per new MES FW version From MES version 0x54, the log entry increased and require the log buffer size to be increased. The 16k is maximum size agreed Signed-off-by: shaoyunl Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 5 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 1 + 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 281efcd3ba8e..7d215f280aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -103,7 +103,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) if (!amdgpu_mes_log_enable) return 0; - r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, + r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->mes.event_log_gpu_obj, &adev->mes.event_log_gpu_addr, @@ -1548,12 +1548,11 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, - mem, PAGE_SIZE, false); + mem, AMDGPU_MES_LOG_BUFFER_SIZE, false); return 0; } - DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 7d4f93fea937..4c8fc3117ef8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -52,6 +52,7 @@ enum amdgpu_mes_priority_level { #define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */ #define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */ +#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */ struct amdgpu_mes_funcs; -- cgit v1.2.3-70-g09d2 From d045f4ad7700c271fa1278b78ef7722f833a8068 Mon Sep 17 00:00:00 2001 From: lima1002 Date: Mon, 29 Jan 2024 20:17:54 +0800 Subject: drm/amd/swsmu: Update smu v14.0.0 headers to be 14.0.1 compatible update ppsmc.h pmfw.h and driver_if.h for smu v14_0_1 Reviewed-by: Alex Deucher Signed-off-by: lima1002 Signed-off-by: Alex Deucher --- .../pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h | 33 +- .../amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h | 55 +++- .../amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h | 18 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h | 1 + drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 2 +- .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c | 347 +++++++++++++++++++-- 6 files changed, 413 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 5bb7a63c0602..97522c085258 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -144,6 +144,37 @@ typedef struct { uint32_t MaxGfxClk; } DpmClocks_t; +//Freq in MHz +//Voltage in milli volts with 2 fractional bits +typedef struct { + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks0[NUM_VCN_DPM_LEVELS]; + uint32_t VClocks1[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks0[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks1[NUM_VCN_DPM_LEVELS]; + uint32_t VPEClocks[NUM_VPE_DPM_LEVELS]; + uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS]; + uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS]; + + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0 + uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1 + uint8_t VpeClkLevelsEnabled; + uint8_t NumMemPstatesEnabled; + uint8_t NumFclkLevelsEnabled; + uint8_t spare; + + uint32_t MinGfxClk; + uint32_t MaxGfxClk; +} DpmClocks_t_v14_0_1; + typedef struct { uint16_t CoreFrequency[16]; //Target core frequency [MHz] uint16_t CorePower[16]; //CAC calculated core power [mW] @@ -224,7 +255,7 @@ typedef enum { #define TABLE_CUSTOM_DPM 2 // Called by Driver #define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS #define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS -#define TABLE_SPARE0 5 // Unused +#define TABLE_MOMENTARY_PM 5 // Called by Tools #define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log #define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF #define TABLE_COUNT 8 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h index 356e0f57a426..ddb625860083 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h @@ -42,7 +42,7 @@ #define FEATURE_EDC_BIT 7 #define FEATURE_PLL_POWER_DOWN_BIT 8 #define FEATURE_VDDOFF_BIT 9 -#define FEATURE_VCN_DPM_BIT 10 +#define FEATURE_VCN_DPM_BIT 10 /* this is for both VCN0 and VCN1 */ #define FEATURE_DS_MPM_BIT 11 #define FEATURE_FCLK_DPM_BIT 12 #define FEATURE_SOCCLK_DPM_BIT 13 @@ -56,9 +56,9 @@ #define FEATURE_DS_GFXCLK_BIT 21 #define FEATURE_DS_SOCCLK_BIT 22 #define FEATURE_DS_LCLK_BIT 23 -#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks +#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 #define FEATURE_DS_SHUBCLK_BIT 25 -#define FEATURE_SPARE0_BIT 26 //SPARE +#define FEATURE_RESERVED0_BIT 26 #define FEATURE_ZSTATES_BIT 27 #define FEATURE_IOMMUL2_PG_BIT 28 #define FEATURE_DS_FCLK_BIT 29 @@ -66,8 +66,8 @@ #define FEATURE_DS_MP1CLK_BIT 31 #define FEATURE_WHISPER_MODE_BIT 32 #define FEATURE_SMU_LOW_POWER_BIT 33 -#define FEATURE_SMART_L3_RINSER_BIT 34 -#define FEATURE_SPARE1_BIT 35 //SPARE +#define FEATURE_RESERVED1_BIT 34 /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */ +#define FEATURE_GFX_DEM_BIT 35 /* v14_0_0 SPARE; v14_0_1 GFX_DEM */ #define FEATURE_PSI_BIT 36 #define FEATURE_PROCHOT_BIT 37 #define FEATURE_CPUOFF_BIT 38 @@ -77,11 +77,11 @@ #define FEATURE_PERF_LIMIT_BIT 42 #define FEATURE_CORE_DLDO_BIT 43 #define FEATURE_DVO_BIT 44 -#define FEATURE_DS_VCN_BIT 45 +#define FEATURE_DS_VCN_BIT 45 /* v14_0_1 this is for both VCN0 and VCN1 */ #define FEATURE_CPPC_BIT 46 #define FEATURE_CPPC_PREFERRED_CORES 47 #define FEATURE_DF_CSTATES_BIT 48 -#define FEATURE_SPARE2_BIT 49 //SPARE +#define FEATURE_FAST_PSTATE_CLDO_BIT 49 /* v14_0_0 SPARE */ #define FEATURE_ATHUB_PG_BIT 50 #define FEATURE_VDDOFF_ECO_BIT 51 #define FEATURE_ZSTATES_ECO_BIT 52 @@ -93,8 +93,8 @@ #define FEATURE_DS_IPUCLK_BIT 58 #define FEATURE_DS_VPECLK_BIT 59 #define FEATURE_VPE_DPM_BIT 60 -#define FEATURE_SPARE_61 61 -#define FEATURE_FP_DIDT 62 +#define FEATURE_SMART_L3_RINSER_BIT 61 /* v14_0_0 SPARE*/ +#define FEATURE_PCC_BIT 62 /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */ #define NUM_FEATURES 63 // Firmware Header/Footer @@ -151,6 +151,43 @@ typedef struct { // MP1_EXT_SCRATCH7 = RTOS Current Job } FwStatus_t; +typedef struct { + // MP1_EXT_SCRATCH0 + uint32_t DpmHandlerID : 8; + uint32_t ActivityMonitorID : 8; + uint32_t DpmTimerID : 8; + uint32_t DpmHubID : 4; + uint32_t DpmHubTask : 4; + // MP1_EXT_SCRATCH1 + uint32_t CclkSyncStatus : 8; + uint32_t ZstateStatus : 4; + uint32_t Cpu1VddOff : 4; + uint32_t DstateFun : 4; + uint32_t DstateDev : 4; + uint32_t GfxOffStatus : 2; + uint32_t Cpu0Off : 2; + uint32_t Cpu1Off : 2; + uint32_t Cpu0VddOff : 2; + // MP1_EXT_SCRATCH2 + uint32_t P2JobHandler :32; + // MP1_EXT_SCRATCH3 + uint32_t PostCode :32; + // MP1_EXT_SCRATCH4 + uint32_t MsgPortBusy :15; + uint32_t RsmuPmiP1Pending : 1; + uint32_t RsmuPmiP2PendingCnt : 8; + uint32_t DfCstateExitPending : 1; + uint32_t Pc6EntryPending : 1; + uint32_t Pc6ExitPending : 1; + uint32_t WarmResetPending : 1; + uint32_t Mp0ClkPending : 1; + uint32_t InWhisperMode : 1; + uint32_t spare2 : 2; + // MP1_EXT_SCRATCH5 + uint32_t IdleMask :32; + // MP1_EXT_SCRATCH6 = RTOS threads' status + // MP1_EXT_SCRATCH7 = RTOS Current Job +} FwStatus_t_v14_0_1; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h index ca7ce4251482..c4dc5881d8df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h @@ -72,23 +72,19 @@ #define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0) - #define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU - -#define PPSMC_MSG_spare_0x17 0x17 -#define PPSMC_MSG_spare_0x18 0x18 +#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency +#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency #define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry #define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry #define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK #define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK - #define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK #define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK #define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0) -#define PPSMC_MSG_spare_0x20 0x20 +#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage #define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0 #define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default - #define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK #define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity @@ -99,8 +95,8 @@ #define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM #define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK #define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK -#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler -#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler +#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler +#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler #define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis #define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn #define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE @@ -110,7 +106,9 @@ #define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA #define PPSMC_MSG_SetSoftMaxVpe 0x36 ///< #define PPSMC_MSG_SetSoftMinVpe 0x37 ///< -#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages +#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache +#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache +#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages /** @}*/ /** diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 3f7463c1c1a9..4af1985ae446 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -27,6 +27,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 #define FEATURE_MASK(feature) (1ULL << feature) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 9e39f99154f9..07a65e005785 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -234,7 +234,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; case IP_VERSION(14, 0, 1): - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index ff6e464e40a6..e4419e1561ef 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -161,7 +161,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu) SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), + SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); @@ -171,7 +171,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu) goto err0_out; smu_table->metrics_time = 0; - smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); + smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL); if (!smu_table->clocks_table) goto err1_out; @@ -599,6 +599,60 @@ static int smu_v14_0_0_mode2_reset(struct smu_context *smu) return ret; } +static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t dpm_level, + uint32_t *freq) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + + if (!clk_table || clk_type >= SMU_CLK_COUNT) + return -EINVAL; + + switch (clk_type) { + case SMU_SOCCLK: + if (dpm_level >= clk_table->NumSocClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->SocClocks[dpm_level]; + break; + case SMU_VCLK: + if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->VClocks0[dpm_level]; + break; + case SMU_DCLK: + if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->DClocks0[dpm_level]; + break; + case SMU_VCLK1: + if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->VClocks1[dpm_level]; + break; + case SMU_DCLK1: + if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->DClocks1[dpm_level]; + break; + case SMU_UCLK: + case SMU_MCLK: + if (dpm_level >= clk_table->NumMemPstatesEnabled) + return -EINVAL; + *freq = clk_table->MemPstateTable[dpm_level].MemClk; + break; + case SMU_FCLK: + if (dpm_level >= clk_table->NumFclkLevelsEnabled) + return -EINVAL; + *freq = clk_table->FclkClocks_Freq[dpm_level]; + break; + default: + return -EINVAL; + } + + return 0; +} + static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t dpm_level, @@ -643,6 +697,19 @@ static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu, return 0; } +static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t dpm_level, + uint32_t *freq) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); + + return 0; +} + static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) { @@ -663,6 +730,8 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, break; case SMU_VCLK: case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: feature_id = SMU_FEATURE_VCN_DPM_BIT; break; default: @@ -672,6 +741,126 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, return smu_cmn_feature_is_enabled(smu, feature_id); } +static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + uint32_t clock_limit; + uint32_t max_dpm_level, min_dpm_level; + int ret = 0; + + if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + clock_limit = smu->smu_table.boot_values.uclk; + break; + case SMU_FCLK: + clock_limit = smu->smu_table.boot_values.fclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + clock_limit = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + clock_limit = smu->smu_table.boot_values.socclk; + break; + case SMU_VCLK: + case SMU_VCLK1: + clock_limit = smu->smu_table.boot_values.vclk; + break; + case SMU_DCLK: + case SMU_DCLK1: + clock_limit = smu->smu_table.boot_values.dclk; + break; + default: + clock_limit = 0; + break; + } + + /* clock in Mhz unit */ + if (min) + *min = clock_limit / 100; + if (max) + *max = clock_limit / 100; + + return 0; + } + + if (max) { + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + *max = clk_table->MaxGfxClk; + break; + case SMU_MCLK: + case SMU_UCLK: + case SMU_FCLK: + max_dpm_level = 0; + break; + case SMU_SOCCLK: + max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; + break; + case SMU_VCLK: + case SMU_DCLK: + max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1; + break; + case SMU_VCLK1: + case SMU_DCLK1: + max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1; + break; + default: + ret = -EINVAL; + goto failed; + } + + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); + if (ret) + goto failed; + } + } + + if (min) { + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + *min = clk_table->MinGfxClk; + break; + case SMU_MCLK: + case SMU_UCLK: + min_dpm_level = clk_table->NumMemPstatesEnabled - 1; + break; + case SMU_FCLK: + min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + break; + case SMU_SOCCLK: + min_dpm_level = 0; + break; + case SMU_VCLK: + case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: + min_dpm_level = 0; + break; + default: + ret = -EINVAL; + goto failed; + } + + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); + if (ret) + goto failed; + } + } + +failed: + return ret; +} + static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, @@ -742,7 +931,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, } if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); if (ret) goto failed; } @@ -774,7 +963,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, } if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); if (ret) goto failed; } @@ -784,6 +973,19 @@ failed: return ret; } +static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max); + + return 0; +} + static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value) @@ -817,6 +1019,37 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value); } +static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *count) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + + switch (clk_type) { + case SMU_SOCCLK: + *count = clk_table->NumSocClkLevelsEnabled; + break; + case SMU_VCLK: + case SMU_DCLK: + *count = clk_table->Vcn0ClkLevelsEnabled; + break; + case SMU_VCLK1: + case SMU_DCLK1: + *count = clk_table->Vcn1ClkLevelsEnabled; + break; + case SMU_MCLK: + *count = clk_table->NumMemPstatesEnabled; + break; + case SMU_FCLK: + *count = clk_table->NumFclkLevelsEnabled; + break; + default: + break; + } + + return 0; +} + static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *count) @@ -846,6 +1079,18 @@ static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, return 0; } +static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *count) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_get_dpm_level_count(smu, clk_type, count); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_get_dpm_level_count(smu, clk_type, count); + + return 0; +} + static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { @@ -872,18 +1117,20 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, case SMU_SOCCLK: case SMU_VCLK: case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: case SMU_MCLK: case SMU_FCLK: ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value); if (ret) break; - ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count); + ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count); if (ret) break; for (i = 0; i < count; i++) { - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) break; @@ -946,8 +1193,13 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, break; case SMU_VCLK: case SMU_DCLK: - msg_set_min = SMU_MSG_SetHardMinVcn; - msg_set_max = SMU_MSG_SetSoftMaxVcn; + msg_set_min = SMU_MSG_SetHardMinVcn0; + msg_set_max = SMU_MSG_SetSoftMaxVcn0; + break; + case SMU_VCLK1: + case SMU_DCLK1: + msg_set_min = SMU_MSG_SetHardMinVcn1; + msg_set_max = SMU_MSG_SetSoftMaxVcn1; break; default: return -EINVAL; @@ -977,11 +1229,11 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, case SMU_FCLK: case SMU_VCLK: case SMU_DCLK: - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) break; - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); if (ret) break; @@ -1006,25 +1258,25 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); sclk_min = sclk_max; fclk_min = fclk_max; socclk_min = socclk_max; break; case AMD_DPM_FORCED_LEVEL_LOW: - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); sclk_max = sclk_min; fclk_max = fclk_min; socclk_max = socclk_min; break; case AMD_DPM_FORCED_LEVEL_AUTO: - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: @@ -1073,6 +1325,18 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, return ret; } +static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + + smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; + smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; + smu->gfx_actual_hard_min_freq = 0; + smu->gfx_actual_soft_max_freq = 0; + + return 0; +} + static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) { DpmClocks_t *clk_table = smu->smu_table.clocks_table; @@ -1085,6 +1349,16 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm return 0; } +static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu); + + return 0; +} + static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu, bool enable) { @@ -1101,6 +1375,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, 0, NULL); } +static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + uint8_t idx; + + /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */ + for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) { + clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0; + clock_table->SocClocks[idx].Vol = 0; + } + + for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) { + clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0; + clock_table->VPEClocks[idx].Vol = 0; + } + + return 0; +} + static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) { DpmClocks_t *clk_table = smu->smu_table.clocks_table; @@ -1120,6 +1413,16 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks * return 0; } +static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_14_0_0_get_dpm_table(smu, clock_table); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_14_0_1_get_dpm_table(smu, clock_table); + + return 0; +} + static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .check_fw_version = smu_v14_0_check_fw_version, @@ -1141,16 +1444,16 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .set_driver_table_location = smu_v14_0_set_driver_table_location, .gfx_off_control = smu_v14_0_gfx_off_control, .mode2_reset = smu_v14_0_0_mode2_reset, - .get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq, + .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq, .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, .set_performance_level = smu_v14_0_0_set_performance_level, - .set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters, + .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, - .get_dpm_clock_table = smu_14_0_0_get_dpm_table, + .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, }; static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu) -- cgit v1.2.3-70-g09d2 From f5a3507c4abf662cf108e3963565d2855aac88ce Mon Sep 17 00:00:00 2001 From: Yifan Zhang Date: Tue, 12 Dec 2023 17:17:05 +0800 Subject: drm/amdgpu: add smu 14.0.1 discovery support This patch to add smu 14.0.1 support Reviewed-by: Alex Deucher Signed-off-by: Yifan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 362da1413e1e..07c5fca06178 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1905,6 +1905,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); break; case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 1): amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); break; default: -- cgit v1.2.3-70-g09d2 From fec85f995a4ba43d1a1359934c7fe577389c300d Mon Sep 17 00:00:00 2001 From: Mounika Adhuri Date: Fri, 15 Mar 2024 21:36:10 +0530 Subject: drm/amd/display: Fix compiler redefinition warnings for certain configs [why & how] Modified definitions of 1 function and 2 structs to remove warnings on certain specific compiler configurations due to redefinition. Reviewed-by: Martin Leung Acked-by: Roman Li Signed-off-by: Mounika Adhuri Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 4 ++-- drivers/gpu/drm/amd/display/include/grph_object_id.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 35c631c22934..17b404cb1155 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -249,7 +249,7 @@ static bool dce110_enable_display_power_gating( return false; } -static void build_prescale_params(struct ipp_prescale_params *prescale_params, +static void dce110_prescale_params(struct ipp_prescale_params *prescale_params, const struct dc_plane_state *plane_state) { prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED; @@ -291,7 +291,7 @@ dce110_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, tf = &plane_state->in_transfer_func; - build_prescale_params(&prescale_params, plane_state); + dce110_prescale_params(&prescale_params, plane_state); ipp->funcs->ipp_program_prescale(ipp, &prescale_params); if (!plane_state->gamma_correction.is_identity && diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h index c6bbd262f1ac..08ee0350b31f 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_id.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h @@ -226,8 +226,8 @@ enum dp_alt_mode { struct graphics_object_id { uint32_t id:8; - uint32_t enum_id:4; - uint32_t type:4; + enum object_enum_id enum_id; + enum object_type type; uint32_t reserved:16; /* for padding. total size should be u32 */ }; -- cgit v1.2.3-70-g09d2 From 4df96ba66760345471a85ef7bb29e1cd4e956057 Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Mon, 11 Mar 2024 13:43:37 -0400 Subject: drm/amd/display: Add timing pixel encoding for mst mode validation [Why] Mode pbn is not calculated correctly because timing pixel encoding is not checked within convert_dc_color_depth_into_bpc. [How] Get mode kbps from dc_bandwidth_in_kbps_from_timing, then calculate pbn by kbps_to_peak_pbn. Reviewed-by: Wayne Lin Acked-by: Roman Li Signed-off-by: Hersen Wu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 35 ++++++++++++++++++---- 1 file changed, 30 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 941e96f100f4..ad3170b72a47 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -1601,7 +1601,7 @@ enum dc_status dm_dp_mst_is_port_support_mode( struct amdgpu_dm_connector *aconnector, struct dc_stream_state *stream) { - int bpp, pbn, branch_max_throughput_mps = 0; + int pbn, branch_max_throughput_mps = 0; struct dc_link_settings cur_link_settings; unsigned int end_to_end_bw_in_kbps = 0; unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; @@ -1651,11 +1651,36 @@ enum dc_status dm_dp_mst_is_port_support_mode( } } } else { - /* check if mode could be supported within full_pbn */ - bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; - pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4); - if (pbn > aconnector->mst_output_port->full_pbn) + /* Check if mode could be supported within max slot + * number of current mst link and full_pbn of mst links. + */ + int pbn_div, slot_num, max_slot_num; + enum dc_link_encoding_format link_encoding; + uint16_t fec_overhead_multiplier_x1000 = + get_fec_overhead_multiplier(stream->link); + uint32_t stream_kbps = + dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(stream->link)); + + pbn = kbps_to_peak_pbn(stream_kbps, fec_overhead_multiplier_x1000); + pbn_div = dm_mst_get_pbn_divider(stream->link); + slot_num = DIV_ROUND_UP(pbn, pbn_div); + + link_encoding = dc_link_get_highest_encoding_format(stream->link); + if (link_encoding == DC_LINK_ENCODING_DP_8b_10b) + max_slot_num = 63; + else if (link_encoding == DC_LINK_ENCODING_DP_128b_132b) + max_slot_num = 64; + else { + DRM_DEBUG_DRIVER("Invalid link encoding format\n"); return DC_FAIL_BANDWIDTH_VALIDATE; + } + + if (slot_num > max_slot_num || + pbn > aconnector->mst_output_port->full_pbn) { + DRM_DEBUG_DRIVER("Mode can not be supported within mst links!"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } } /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ -- cgit v1.2.3-70-g09d2 From 7ae0caf348830d92661296ec46ec5b4cfeb440b2 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Fri, 15 Mar 2024 16:19:21 -0400 Subject: drm/amd/display: fix underflow in some two display subvp/non-subvp configs [Why] In two display configuration, switching between subvp and non-subvp may cause underflow because it moves an existing pipe between displays [How] Create helper function for applying pipe split flags Apply pipe split flags prior to deciding on subvp During subvp check, do not merge pipes, so it can retain previous pipe configuration Add check for prev odm pipe in subvp check For single display subvp case, use same odm policy for phantom pipes as main subvp pipe Reviewed-by: Alvin Lee Acked-by: Roman Li Signed-off-by: Samson Tam Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 259 +++++++++++---------- .../amd/display/dc/resource/dcn32/dcn32_resource.c | 70 +++++- 2 files changed, 199 insertions(+), 130 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index a0a65e099104..8c0e1ab29aa9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -180,6 +180,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = { .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, }; +static bool dcn32_apply_merge_split_flags_helper(struct dc *dc, struct dc_state *context, + bool *repopulate_pipes, int *split, bool *merge); + void dcn32_build_wm_range_table_fpu(struct clk_mgr_internal *clk_mgr) { /* defaults */ @@ -622,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * to combine this with SubVP can cause issues with the scheduling). * - Not TMZ surface */ - if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && + if (pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe && !dcn32_is_center_timing(pipe) && !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && @@ -1425,13 +1428,14 @@ static bool is_test_pattern_enabled( return false; } -static void dcn32_full_validate_bw_helper(struct dc *dc, +static bool dcn32_full_validate_bw_helper(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *vlevel, int *split, bool *merge, - int *pipe_cnt) + int *pipe_cnt, + bool *repopulate_pipes) { struct vba_vars_st *vba = &context->bw_ctx.dml.vba; unsigned int dc_pipe_idx = 0; @@ -1461,6 +1465,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, vba->VoltageLevel = *vlevel; } + /* Apply split and merge flags before checking for subvp */ + if (!dcn32_apply_merge_split_flags_helper(dc, context, repopulate_pipes, split, merge)) + return false; + memset(split, 0, MAX_PIPES * sizeof(int)); + memset(merge, 0, MAX_PIPES * sizeof(bool)); + /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP * 2. Full update (i.e. !fast_validate) @@ -1475,19 +1485,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || dc->debug.force_subvp_mclk_switch)) { - dcn32_merge_pipes_for_subvp(dc, context); - memset(merge, 0, MAX_PIPES * sizeof(bool)); - vlevel_temp = *vlevel; - /* to re-initialize viewport after the pipe merge */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (!pipe_ctx->plane_state || !pipe_ctx->stream) - continue; - - resource_build_scaling_params(pipe_ctx); - } while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) && dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) { @@ -1576,8 +1574,6 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, * add phantom pipes. If pipe split (ODM / MPC) is required, both the main * and phantom pipes will be split in the regular pipe splitting sequence. */ - memset(split, 0, MAX_PIPES * sizeof(int)); - memset(merge, 0, MAX_PIPES * sizeof(bool)); *vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); vba->VoltageLevel = *vlevel; // Note: We can't apply the phantom pipes to hardware at this time. We have to wait @@ -1590,6 +1586,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc, try_odm_power_optimization_and_revalidate( dc, context, pipes, split, merge, vlevel, *pipe_cnt); + return true; } static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) @@ -1929,106 +1926,23 @@ static bool dcn32_split_stream_for_mpc_or_odm( return true; } -bool dcn32_internal_validate_bw(struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *vlevel_out, - bool fast_validate) +static bool dcn32_apply_merge_split_flags_helper( + struct dc *dc, + struct dc_state *context, + bool *repopulate_pipes, + int *split, + bool *merge) { - bool out = false; - bool repopulate_pipes = false; - int split[MAX_PIPES] = { 0 }; - bool merge[MAX_PIPES] = { false }; + int i, pipe_idx; bool newly_split[MAX_PIPES] = { false }; - int pipe_cnt, i, pipe_idx; - int vlevel = context->bw_ctx.dml.soc.num_states; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; - dc_assert_fp_enabled(); - - ASSERT(pipes); - if (!pipes) - return false; - - // For each full update, remove all existing phantom pipes first - dc_state_remove_phantom_streams_and_planes(dc, context); - dc_state_release_phantom_streams_and_planes(dc, context); - - dc->res_pool->funcs->update_soc_for_wm_a(dc, context); - - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - - if (!pipe_cnt) { - out = true; - goto validate_out; - } - - dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); - context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); - - if (!fast_validate) - dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt); - - if (fast_validate || - (dc->debug.dml_disallow_alternate_prefetch_modes && - (vlevel == context->bw_ctx.dml.soc.num_states || - vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { - /* - * If dml_disallow_alternate_prefetch_modes is false, then we have already - * tried alternate prefetch modes during full validation. - * - * If mode is unsupported or there is no p-state support, then - * fall back to favouring voltage. - * - * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try - * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2) - */ - context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = - dm_prefetch_support_none; - - context->bw_ctx.dml.validate_max_state = fast_validate; - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - context->bw_ctx.dml.validate_max_state = false; - - if (vlevel < context->bw_ctx.dml.soc.num_states) { - memset(split, 0, sizeof(split)); - memset(merge, 0, sizeof(merge)); - vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); - // dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML - vba->VoltageLevel = vlevel; - } - } - - dml_log_mode_support_params(&context->bw_ctx.dml); - - if (vlevel == context->bw_ctx.dml.soc.num_states) - goto validate_fail; - - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; - - if (!pipe->stream) - continue; - - if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled - && !dc->config.enable_windowed_mpo_odm - && pipe->plane_state && mpo_pipe - && memcmp(&mpo_pipe->plane_state->clip_rect, - &pipe->stream->src, - sizeof(struct rect)) != 0) { - ASSERT(mpo_pipe->plane_state != pipe->plane_state); - goto validate_fail; - } - pipe_idx++; - } - if (dc->config.enable_windowed_mpo_odm) { - repopulate_pipes = update_pipes_with_split_flags( - dc, context, vba, split, merge); + if (update_pipes_with_split_flags( + dc, context, vba, split, merge)) + *repopulate_pipes = true; } else { + /* the code below will be removed once windowed mpo odm is fully * enabled. */ @@ -2085,7 +1999,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); memset(&pipe->link_res, 0, sizeof(pipe->link_res)); - repopulate_pipes = true; + *repopulate_pipes = true; } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { struct pipe_ctx *top_pipe = pipe->top_pipe; struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; @@ -2101,7 +2015,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); memset(&pipe->link_res, 0, sizeof(pipe->link_res)); - repopulate_pipes = true; + *repopulate_pipes = true; } else ASSERT(0); /* Should never try to merge master pipe */ @@ -2140,15 +2054,15 @@ bool dcn32_internal_validate_bw(struct dc *dc, hsplit_pipe = dcn32_find_split_pipe(dc, context, old_index); ASSERT(hsplit_pipe); if (!hsplit_pipe) - goto validate_fail; + return false; if (!dcn32_split_stream_for_mpc_or_odm( dc, &context->res_ctx, pipe, hsplit_pipe, odm)) - goto validate_fail; + return false; newly_split[hsplit_pipe->pipe_idx] = true; - repopulate_pipes = true; + *repopulate_pipes = true; } if (split[i] == 4) { struct pipe_ctx *pipe_4to1; @@ -2163,11 +2077,11 @@ bool dcn32_internal_validate_bw(struct dc *dc, pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index); ASSERT(pipe_4to1); if (!pipe_4to1) - goto validate_fail; + return false; if (!dcn32_split_stream_for_mpc_or_odm( dc, &context->res_ctx, pipe, pipe_4to1, odm)) - goto validate_fail; + return false; newly_split[pipe_4to1->pipe_idx] = true; if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe @@ -2182,11 +2096,11 @@ bool dcn32_internal_validate_bw(struct dc *dc, pipe_4to1 = dcn32_find_split_pipe(dc, context, old_index); ASSERT(pipe_4to1); if (!pipe_4to1) - goto validate_fail; + return false; if (!dcn32_split_stream_for_mpc_or_odm( dc, &context->res_ctx, hsplit_pipe, pipe_4to1, odm)) - goto validate_fail; + return false; newly_split[pipe_4to1->pipe_idx] = true; } if (odm) @@ -2198,10 +2112,113 @@ bool dcn32_internal_validate_bw(struct dc *dc, if (pipe->plane_state) { if (!resource_build_scaling_params(pipe)) - goto validate_fail; + return false; } } } + return true; +} + +bool dcn32_internal_validate_bw(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *vlevel_out, + bool fast_validate) +{ + bool out = false; + bool repopulate_pipes = false; + int split[MAX_PIPES] = { 0 }; + bool merge[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx; + int vlevel = context->bw_ctx.dml.soc.num_states; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + dc_assert_fp_enabled(); + + ASSERT(pipes); + if (!pipes) + return false; + + /* For each full update, remove all existing phantom pipes first */ + dc_state_remove_phantom_streams_and_planes(dc, context); + dc_state_release_phantom_streams_and_planes(dc, context); + + dc->res_pool->funcs->update_soc_for_wm_a(dc, context); + + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); + context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); + + if (!fast_validate) { + if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, + &pipe_cnt, &repopulate_pipes)) + goto validate_fail; + } + + if (fast_validate || + (dc->debug.dml_disallow_alternate_prefetch_modes && + (vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { + /* + * If dml_disallow_alternate_prefetch_modes is false, then we have already + * tried alternate prefetch modes during full validation. + * + * If mode is unsupported or there is no p-state support, then + * fall back to favouring voltage. + * + * If Prefetch mode 0 failed for this config, or passed with Max UCLK, then try + * to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2) + */ + context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = + dm_prefetch_support_none; + + context->bw_ctx.dml.validate_max_state = fast_validate; + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + + context->bw_ctx.dml.validate_max_state = false; + + if (vlevel < context->bw_ctx.dml.soc.num_states) { + memset(split, 0, sizeof(split)); + memset(merge, 0, sizeof(merge)); + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); + /* dcn20_validate_apply_pipe_split_flags can modify voltage level outside of DML */ + vba->VoltageLevel = vlevel; + } + } + + dml_log_mode_support_params(&context->bw_ctx.dml); + + if (vlevel == context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + + if (!pipe->stream) + continue; + + if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled + && !dc->config.enable_windowed_mpo_odm + && pipe->plane_state && mpo_pipe + && memcmp(&mpo_pipe->plane_state->clip_rect, + &pipe->stream->src, + sizeof(struct rect)) != 0) { + ASSERT(mpo_pipe->plane_state != pipe->plane_state); + goto validate_fail; + } + pipe_idx++; + } + + if (!dcn32_apply_merge_split_flags_helper(dc, context, &repopulate_pipes, split, merge)) + goto validate_fail; /* Actual dsc count per stream dsc validation*/ if (!dcn20_validate_dsc(dc, context)) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index da15ad845147..0c8dd71148b4 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1815,9 +1815,47 @@ int dcn32_populate_dml_pipes_from_context( struct pipe_ctx *pipe = NULL; bool subvp_in_use = false; struct dc_crtc_timing *timing; + int subvp_main_pipe_index = -1; + enum mall_stream_type mall_type; + bool single_display_subvp = false; + struct dc_stream_state *stream = NULL; + int num_subvp_main = 0; + int num_subvp_phantom = 0; + int num_subvp_none = 0; dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + /* For single display subvp, look for subvp main so if we have phantom + * pipe, we can set odm policy to match main pipe + */ + for (i = 0; i < context->stream_count; i++) { + stream = context->streams[i]; + mall_type = dc_state_get_stream_subvp_type(context, stream); + if (mall_type == SUBVP_MAIN) + num_subvp_main++; + else if (mall_type == SUBVP_PHANTOM) + num_subvp_phantom++; + else + num_subvp_none++; + } + if (num_subvp_main == 1 && num_subvp_phantom == 1 && num_subvp_none == 0) + single_display_subvp = true; + + if (single_display_subvp) { + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &res_ctx->pipe_ctx[i]; + if (!res_ctx->pipe_ctx[i].stream) + continue; + + mall_type = dc_state_get_pipe_subvp_type(context, pipe); + if (mall_type == SUBVP_MAIN) { + if (resource_is_pipe_type(pipe, OTG_MASTER)) + subvp_main_pipe_index = pipe_cnt; + } + pipe_cnt++; + } + } + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!res_ctx->pipe_ctx[i].stream) @@ -1832,19 +1870,33 @@ int dcn32_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; if (dc->config.enable_windowed_mpo_odm && dc->debug.enable_single_display_2to1_odm_policy) { - switch (resource_get_odm_slice_count(pipe)) { - case 2: - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; - break; - case 4: - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; - break; - default: - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + /* For single display subvp, if pipe is phantom pipe, + * then copy odm policy from subvp main pipe + */ + mall_type = dc_state_get_pipe_subvp_type(context, pipe); + if (single_display_subvp && (mall_type == SUBVP_PHANTOM)) { + if (subvp_main_pipe_index < 0) { + ASSERT(0); + } else { + pipes[pipe_cnt].pipe.dest.odm_combine_policy = + pipes[subvp_main_pipe_index].pipe.dest.odm_combine_policy; + } + } else { + switch (resource_get_odm_slice_count(pipe)) { + case 2: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + break; + case 4: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; + break; + default: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + } } } else { pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; } + pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256; // according to spreadsheet pipes[pipe_cnt].pipe.src.unbounded_req_mode = false; pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_19; -- cgit v1.2.3-70-g09d2 From 62297b71a02d73e761470f7907ec1ce63bb3615c Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Fri, 15 Mar 2024 12:03:12 -0400 Subject: drm/amd/display: optimize dml2 pipe resource allocation order [why] There could be cases that we are transition from MPC to ODM combine. In this case if we map pipes before unmapping MPC pipes, we might temporarly run out of pipes. The change reorders pipe resource allocation. So we unmapping pipes before mapping new pipes. Reviewed-by: Dillon Varone Acked-by: Roman Li Signed-off-by: Wenjing Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 + .../amd/display/dc/dml2/dml2_dc_resource_mgmt.c | 126 ++++++++++++++------- .../drm/amd/display/dc/dml2/dml2_internal_types.h | 11 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 2 + 4 files changed, 97 insertions(+), 44 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index dd0428024173..601af21b2df9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -5052,7 +5052,9 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio dml2_options->callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; dml2_options->callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; dml2_options->callbacks.get_mpc_slice_index = &resource_get_mpc_slice_index; + dml2_options->callbacks.get_mpc_slice_count = &resource_get_mpc_slice_count; dml2_options->callbacks.get_odm_slice_index = &resource_get_odm_slice_index; + dml2_options->callbacks.get_odm_slice_count = &resource_get_odm_slice_count; dml2_options->callbacks.get_opp_head = &resource_get_opp_head; dml2_options->callbacks.get_otg_master_for_stream = &resource_get_otg_master_for_stream; dml2_options->callbacks.get_opp_heads_for_otg_master = &resource_get_opp_heads_for_otg_master; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index b64e0160d482..27d9da8ad7c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -793,7 +793,7 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id, plane_index); } -static unsigned int get_mpc_factor(struct dml2_context *ctx, +static unsigned int get_target_mpc_factor(struct dml2_context *ctx, struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, @@ -822,7 +822,7 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx, return mpc_factor; } -static unsigned int get_odm_factor( +static unsigned int get_target_odm_factor( const struct dml2_context *ctx, struct dc_state *state, const struct dml_display_cfg_st *disp_cfg, @@ -849,79 +849,117 @@ static unsigned int get_odm_factor( return 1; } +static unsigned int get_source_odm_factor(const struct dml2_context *ctx, + struct dc_state *state, + const struct dc_stream_state *stream) +{ + struct pipe_ctx *otg_master = ctx->config.callbacks.get_otg_master_for_stream(&state->res_ctx, stream); + + return ctx->config.callbacks.get_odm_slice_count(otg_master); +} + +static unsigned int get_source_mpc_factor(const struct dml2_context *ctx, + struct dc_state *state, + const struct dc_plane_state *plane) +{ + struct pipe_ctx *dpp_pipes[MAX_PIPES]; + int dpp_pipe_count = ctx->config.callbacks.get_dpp_pipes_for_plane(plane, + &state->res_ctx, dpp_pipes); + + ASSERT(dpp_pipe_count > 0); + return ctx->config.callbacks.get_mpc_slice_count(dpp_pipes[0]); +} + + static void populate_mpc_factors_for_stream( struct dml2_context *ctx, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, struct dc_state *state, unsigned int stream_idx, - unsigned int odm_factor, - unsigned int mpc_factors[MAX_PIPES]) + struct dml2_pipe_combine_factor odm_factor, + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES]) { const struct dc_stream_status *status = &state->stream_status[stream_idx]; int i; - for (i = 0; i < status->plane_count; i++) - if (odm_factor == 1) - mpc_factors[i] = get_mpc_factor( - ctx, state, disp_cfg, mapping, status, - state->streams[stream_idx], i); - else - mpc_factors[i] = 1; + for (i = 0; i < status->plane_count; i++) { + mpc_factors[i].source = get_source_mpc_factor(ctx, state, status->plane_states[i]); + mpc_factors[i].target = (odm_factor.target == 1) ? + get_target_mpc_factor(ctx, state, disp_cfg, mapping, status, state->streams[stream_idx], i) : 1; + } } static void populate_odm_factors(const struct dml2_context *ctx, const struct dml_display_cfg_st *disp_cfg, struct dml2_dml_to_dc_pipe_mapping *mapping, struct dc_state *state, - unsigned int odm_factors[MAX_PIPES]) + struct dml2_pipe_combine_factor odm_factors[MAX_PIPES]) { int i; - for (i = 0; i < state->stream_count; i++) - odm_factors[i] = get_odm_factor( + for (i = 0; i < state->stream_count; i++) { + odm_factors[i].source = get_source_odm_factor(ctx, state, state->streams[i]); + odm_factors[i].target = get_target_odm_factor( ctx, state, disp_cfg, mapping, state->streams[i]); + } } -static bool map_dc_pipes_for_stream(struct dml2_context *ctx, +static bool unmap_dc_pipes_for_stream(struct dml2_context *ctx, struct dc_state *state, const struct dc_state *existing_state, const struct dc_stream_state *stream, const struct dc_stream_status *status, - unsigned int odm_factor, - unsigned int mpc_factors[MAX_PIPES]) + struct dml2_pipe_combine_factor odm_factor, + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES]) { int plane_idx; bool result = true; - if (odm_factor == 1) - /* - * ODM and MPC combines are by DML design mutually exclusive. - * ODM factor of 1 means MPC factors may be greater than 1. - * In this case, we want to set ODM factor to 1 first to free up - * pipe resources from previous ODM configuration before setting - * up MPC combine to acquire more pipe resources. - */ + for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++) + if (mpc_factors[plane_idx].target < mpc_factors[plane_idx].source) + result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + status->plane_states[plane_idx], + mpc_factors[plane_idx].target); + if (odm_factor.target < odm_factor.source) result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count( state, existing_state, ctx->config.callbacks.dc->res_pool, stream, - odm_factor); + odm_factor.target); + return result; +} + +static bool map_dc_pipes_for_stream(struct dml2_context *ctx, + struct dc_state *state, + const struct dc_state *existing_state, + const struct dc_stream_state *stream, + const struct dc_stream_status *status, + struct dml2_pipe_combine_factor odm_factor, + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES]) +{ + int plane_idx; + bool result = true; + for (plane_idx = 0; plane_idx < status->plane_count; plane_idx++) - result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( - state, - existing_state, - ctx->config.callbacks.dc->res_pool, - status->plane_states[plane_idx], - mpc_factors[plane_idx]); - if (odm_factor > 1) + if (mpc_factors[plane_idx].target > mpc_factors[plane_idx].source) + result &= ctx->config.callbacks.update_pipes_for_plane_with_slice_count( + state, + existing_state, + ctx->config.callbacks.dc->res_pool, + status->plane_states[plane_idx], + mpc_factors[plane_idx].target); + if (odm_factor.target > odm_factor.source) result &= ctx->config.callbacks.update_pipes_for_stream_with_slice_count( state, existing_state, ctx->config.callbacks.dc->res_pool, stream, - odm_factor); + odm_factor.target); return result; } @@ -931,20 +969,20 @@ static bool map_dc_pipes_with_callbacks(struct dml2_context *ctx, struct dml2_dml_to_dc_pipe_mapping *mapping, const struct dc_state *existing_state) { - unsigned int odm_factors[MAX_PIPES]; - unsigned int mpc_factors_for_stream[MAX_PIPES]; int i; bool result = true; - populate_odm_factors(ctx, disp_cfg, mapping, state, odm_factors); - for (i = 0; i < state->stream_count; i++) { + populate_odm_factors(ctx, disp_cfg, mapping, state, ctx->pipe_combine_scratch.odm_factors); + for (i = 0; i < state->stream_count; i++) populate_mpc_factors_for_stream(ctx, disp_cfg, mapping, state, - i, odm_factors[i], mpc_factors_for_stream); - result &= map_dc_pipes_for_stream(ctx, state, existing_state, - state->streams[i], - &state->stream_status[i], - odm_factors[i], mpc_factors_for_stream); - } + i, ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]); + for (i = 0; i < state->stream_count; i++) + result &= unmap_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i], + &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]); + for (i = 0; i < state->stream_count; i++) + result &= map_dc_pipes_for_stream(ctx, state, existing_state, state->streams[i], + &state->stream_status[i], ctx->pipe_combine_scratch.odm_factors[i], ctx->pipe_combine_scratch.mpc_factors[i]); + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h index 1cf8a884c0fb..9dab4e43c511 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h @@ -109,10 +109,21 @@ enum dml2_architecture { dml2_architecture_20, }; +struct dml2_pipe_combine_factor { + unsigned int source; + unsigned int target; +}; + +struct dml2_pipe_combine_scratch { + struct dml2_pipe_combine_factor odm_factors[MAX_PIPES]; + struct dml2_pipe_combine_factor mpc_factors[MAX_PIPES][MAX_PIPES]; +}; + struct dml2_context { enum dml2_architecture architecture; struct dml2_configuration_options config; struct dml2_helper_det_policy_scratch det_helper_scratch; + struct dml2_pipe_combine_scratch pipe_combine_scratch; union { struct { struct display_mode_lib_st dml_core_ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 6e97337863e0..54aff9beb73a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -86,7 +86,9 @@ struct dml2_dc_callbacks { const struct dc_plane_state *plane, int slice_count); int (*get_odm_slice_index)(const struct pipe_ctx *opp_head); + int (*get_odm_slice_count)(const struct pipe_ctx *opp_head); int (*get_mpc_slice_index)(const struct pipe_ctx *dpp_pipe); + int (*get_mpc_slice_count)(const struct pipe_ctx *dpp_pipe); struct pipe_ctx *(*get_opp_head)(const struct pipe_ctx *pipe_ctx); struct pipe_ctx *(*get_otg_master_for_stream)( struct resource_context *res_ctx, -- cgit v1.2.3-70-g09d2 From c9c703952600845a3a59ef9670e5b2d037457c81 Mon Sep 17 00:00:00 2001 From: Daniel Miess Date: Fri, 3 Nov 2023 13:52:53 -0400 Subject: drm/amd/display: Toggle additional RCO options in DCN35 [Why] With root clock optimization now enabled for DCN35 there are still RCO registers still not being toggled [How] Add in logic to toggle RCO registers for DPPCLK, DPSTREAMCLK and DSCCLK Reviewed-by: Charlene Liu Acked-by: Roman Li Signed-off-by: Daniel Miess Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c | 207 ++++++++++++++++++++- .../gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h | 3 +- .../gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c | 69 ++++++- .../gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h | 9 +- .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 28 +++ .../drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h | 2 + .../gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c | 1 + .../drm/amd/display/dc/hwss/hw_sequencer_private.h | 4 + drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 4 + 10 files changed, 314 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 6300ae2ea1f7..29fd8daa9d15 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -695,6 +695,7 @@ enum pg_hw_pipe_resources { PG_MPCC, PG_OPP, PG_OPTC, + PG_DPSTREAM, PG_HW_PIPE_RESOURCES_NUM_ELEMENT }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c index f1ba7bb792ea..58dd3c5bbff0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c @@ -49,15 +49,23 @@ static void dcn35_set_dppclk_enable(struct dccg *dccg, switch (dpp_inst) { case 0: REG_UPDATE(DPPCLK_CTRL, DPPCLK0_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable); break; case 1: REG_UPDATE(DPPCLK_CTRL, DPPCLK1_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable); break; case 2: REG_UPDATE(DPPCLK_CTRL, DPPCLK2_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable); break; case 3: REG_UPDATE(DPPCLK_CTRL, DPPCLK3_EN, enable); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable); break; default: break; @@ -100,6 +108,32 @@ static void dccg35_update_dpp_dto(struct dccg *dccg, int dpp_inst, dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk; } +static void dccg35_set_dppclk_root_clock_gating(struct dccg *dccg, + uint32_t dpp_inst, uint32_t enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + return; + + switch (dpp_inst) { + case 0: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, enable); + break; + case 1: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, enable); + break; + case 2: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, enable); + break; + case 3: + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, enable); + break; + default: + break; + } +} + static void dccg35_get_pixel_rate_div( struct dccg *dccg, uint32_t otg_inst, @@ -333,21 +367,67 @@ static void dccg35_set_dpstreamclk( /* enabled to select one of the DTBCLKs for pipe */ switch (dp_hpo_inst) { case 0: - REG_UPDATE_2(DPSTREAMCLK_CNTL, - DPSTREAMCLK0_EN, + REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK0_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); break; case 1: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); break; case 2: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); break; case 3: REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN, (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, (src == REFCLK) ? 0 : 1); + break; + default: + BREAK_TO_DEBUGGER(); + return; + } +} + + +static void dccg35_set_dpstreamclk_root_clock_gating( + struct dccg *dccg, + int dp_hpo_inst, + bool enable) +{ + struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + + switch (dp_hpo_inst) { + case 0: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, enable ? 1 : 0); + } + break; + case 1: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, enable ? 1 : 0); + } + break; + case 2: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, enable ? 1 : 0); + } + break; + case 3: + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) { + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, enable ? 1 : 0); + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, enable ? 1 : 0); + } break; default: BREAK_TO_DEBUGGER(); @@ -355,6 +435,8 @@ static void dccg35_set_dpstreamclk( } } + + static void dccg35_set_physymclk_root_clock_gating( struct dccg *dccg, int phy_inst, @@ -369,22 +451,32 @@ static void dccg35_set_physymclk_root_clock_gating( case 0: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYA_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; case 1: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYB_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; case 2: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYC_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; case 3: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYD_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; case 4: REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYE_REFCLK_ROOT_GATE_DISABLE, enable ? 1 : 0); break; default: BREAK_TO_DEBUGGER(); @@ -407,10 +499,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN, 1, PHYASYMCLK_SRC_SEL, clk_src); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYA_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_EN, 0, PHYASYMCLK_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYA_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 1: @@ -418,10 +516,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN, 1, PHYBSYMCLK_SRC_SEL, clk_src); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYB_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_EN, 0, PHYBSYMCLK_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYB_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 2: @@ -429,10 +533,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN, 1, PHYCSYMCLK_SRC_SEL, clk_src); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYC_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_EN, 0, PHYCSYMCLK_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYC_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 3: @@ -440,10 +550,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN, 1, PHYDSYMCLK_SRC_SEL, clk_src); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYD_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_EN, 0, PHYDSYMCLK_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYD_REFCLK_ROOT_GATE_DISABLE, 1); } break; case 4: @@ -451,10 +567,16 @@ static void dccg35_set_physymclk( REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN, 1, PHYESYMCLK_SRC_SEL, clk_src); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYE_REFCLK_ROOT_GATE_DISABLE, 0); } else { REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_EN, 0, PHYESYMCLK_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL4, +// PHYE_REFCLK_ROOT_GATE_DISABLE, 1); } break; default: @@ -491,12 +613,12 @@ static void dccg35_dpp_root_clock_control( if (clock_on) { /* turn off the DTO and leave phase/modulo at max */ - dcn35_set_dppclk_enable(dccg, dpp_inst, 0); + dcn35_set_dppclk_enable(dccg, dpp_inst, 1); REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, DPPCLK0_DTO_PHASE, 0xFF, DPPCLK0_DTO_MODULO, 0xFF); } else { - dcn35_set_dppclk_enable(dccg, dpp_inst, 1); + dcn35_set_dppclk_enable(dccg, dpp_inst, 0); /* turn on the DTO to generate a 0hz clock */ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, DPPCLK0_DTO_PHASE, 0, @@ -575,18 +697,32 @@ void dccg35_init(struct dccg *dccg) dccg35_disable_symclk32_se(dccg, otg_inst); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - for (otg_inst = 0; otg_inst < 2; otg_inst++) + for (otg_inst = 0; otg_inst < 2; otg_inst++) { dccg31_disable_symclk32_le(dccg, otg_inst); + dccg31_set_symclk32_le_root_clock_gating(dccg, otg_inst, false); + } + +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// for (otg_inst = 0; otg_inst < 4; otg_inst++) +// dccg35_disable_symclk_se(dccg, otg_inst, otg_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) - for (otg_inst = 0; otg_inst < 4; otg_inst++) - dccg314_set_dpstreamclk(dccg, REFCLK, otg_inst, + for (otg_inst = 0; otg_inst < 4; otg_inst++) { + dccg35_set_dpstreamclk(dccg, REFCLK, otg_inst, otg_inst); + dccg35_set_dpstreamclk_root_clock_gating(dccg, otg_inst, false); + } if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) for (otg_inst = 0; otg_inst < 5; otg_inst++) dccg35_set_physymclk_root_clock_gating(dccg, otg_inst, false); + + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) + for (otg_inst = 0; otg_inst < 4; otg_inst++) + dccg35_set_dppclk_root_clock_gating(dccg, otg_inst, 0); + /* dccg35_enable_global_fgcg_rep( dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits @@ -611,24 +747,32 @@ static void dccg35_enable_dscclk(struct dccg *dccg, int inst) DSCCLK0_DTO_PHASE, 0, DSCCLK0_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK0_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 1); break; case 1: REG_UPDATE_2(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, 0, DSCCLK1_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 1); break; case 2: REG_UPDATE_2(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, 0, DSCCLK2_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 1); break; case 3: REG_UPDATE_2(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, 0, DSCCLK3_DTO_MODULO, 0); REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 1); break; default: BREAK_TO_DEBUGGER(); @@ -650,24 +794,32 @@ static void dccg35_disable_dscclk(struct dccg *dccg, REG_UPDATE_2(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, 0, DSCCLK0_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, 0); break; case 1: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK1_EN, 0); REG_UPDATE_2(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, 0, DSCCLK1_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, 0); break; case 2: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK2_EN, 0); REG_UPDATE_2(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, 0, DSCCLK2_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, 0); break; case 3: REG_UPDATE(DSCCLK_DTO_CTRL, DSCCLK3_EN, 0); REG_UPDATE_2(DSCCLK3_DTO_PARAM, DSCCLK3_DTO_PHASE, 0, DSCCLK3_DTO_MODULO, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.dsc) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, 0); break; default: return; @@ -682,22 +834,32 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, case 0: REG_UPDATE(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 1); break; case 1: REG_UPDATE(SYMCLKB_CLOCK_ENABLE, SYMCLKB_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 1); break; case 2: REG_UPDATE(SYMCLKC_CLOCK_ENABLE, SYMCLKC_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 1); break; case 3: REG_UPDATE(SYMCLKD_CLOCK_ENABLE, SYMCLKD_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 1); break; case 4: REG_UPDATE(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, 1); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 1); break; } @@ -706,26 +868,36 @@ static void dccg35_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, 1, SYMCLKA_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 1); break; case 1: REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, 1, SYMCLKB_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 1); break; case 2: REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, 1, SYMCLKC_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 1); break; case 3: REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, 1, SYMCLKD_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 1); break; case 4: REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, 1, SYMCLKE_FE_SRC_SEL, link_enc_inst); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 1); break; } } @@ -786,26 +958,36 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst REG_UPDATE_2(SYMCLKA_CLOCK_ENABLE, SYMCLKA_FE_EN, 0, SYMCLKA_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, 0); break; case 1: REG_UPDATE_2(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_EN, 0, SYMCLKB_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, 0); break; case 2: REG_UPDATE_2(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_EN, 0, SYMCLKC_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, 0); break; case 3: REG_UPDATE_2(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_EN, 0, SYMCLKD_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 0); break; case 4: REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, 0, SYMCLKE_FE_SRC_SEL, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 0); break; } @@ -818,22 +1000,32 @@ static void dccg35_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst case 0: REG_UPDATE(SYMCLKA_CLOCK_ENABLE, SYMCLKA_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, 0); break; case 1: REG_UPDATE(SYMCLKB_CLOCK_ENABLE, SYMCLKB_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, 0); break; case 2: REG_UPDATE(SYMCLKC_CLOCK_ENABLE, SYMCLKC_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, 0); break; case 3: REG_UPDATE(SYMCLKD_CLOCK_ENABLE, SYMCLKD_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, 0); break; case 4: REG_UPDATE(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, 0); +// if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) +// REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, 0); break; } } @@ -845,6 +1037,7 @@ static const struct dccg_funcs dccg35_funcs = { .get_dccg_ref_freq = dccg31_get_dccg_ref_freq, .dccg_init = dccg35_init, .set_dpstreamclk = dccg35_set_dpstreamclk, + .set_dpstreamclk_root_clock_gating = dccg35_set_dpstreamclk_root_clock_gating, .enable_symclk32_se = dccg31_enable_symclk32_se, .disable_symclk32_se = dccg35_disable_symclk32_se, .enable_symclk32_le = dccg31_enable_symclk32_le, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h index a039eedc7c24..c48139bed11f 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h @@ -1090,7 +1090,8 @@ type DPP_CLOCK_ENABLE; \ type CM_HDR_MULT_COEF; \ type CUR0_FP_BIAS; \ - type CUR0_FP_SCALE; + type CUR0_FP_SCALE;\ + type DISPCLK_R_GATE_DISABLE; struct dcn_dpp_shift { TF_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c index 0146b36b93d7..e16274fee31d 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c @@ -37,14 +37,73 @@ ((const struct dcn35_dpp_shift *)(dpp->tf_shift))->field_name, \ ((const struct dcn35_dpp_mask *)(dpp->tf_mask))->field_name -bool dpp35_construct(struct dcn3_dpp *dpp, struct dc_context *ctx, - uint32_t inst, const struct dcn3_dpp_registers *tf_regs, - const struct dcn35_dpp_shift *tf_shift, - const struct dcn35_dpp_mask *tf_mask) +void dpp35_dppclk_control( + struct dpp *dpp_base, + bool dppclk_div, + bool enable) { - return dpp32_construct(dpp, ctx, inst, tf_regs, + struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); + + if (enable) { + if (dpp->tf_mask->DPPCLK_RATE_CONTROL) + REG_UPDATE_2(DPP_CONTROL, + DPPCLK_RATE_CONTROL, dppclk_div, + DPP_CLOCK_ENABLE, 1); + else + REG_UPDATE_2(DPP_CONTROL, + DPP_CLOCK_ENABLE, 1, + DISPCLK_R_GATE_DISABLE, 1); + } else + REG_UPDATE_2(DPP_CONTROL, + DPP_CLOCK_ENABLE, 0, + DISPCLK_R_GATE_DISABLE, 0); +} + +static struct dpp_funcs dcn35_dpp_funcs = { + .dpp_program_gamcor_lut = dpp3_program_gamcor_lut, + .dpp_read_state = dpp30_read_state, + .dpp_reset = dpp_reset, + .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, + .dpp_get_optimal_number_of_taps = dpp3_get_optimal_number_of_taps, + .dpp_set_gamut_remap = dpp3_cm_set_gamut_remap, + .dpp_set_csc_adjustment = NULL, + .dpp_set_csc_default = NULL, + .dpp_program_regamma_pwl = NULL, + .dpp_set_pre_degam = dpp3_set_pre_degam, + .dpp_program_input_lut = NULL, + .dpp_full_bypass = dpp1_full_bypass, + .dpp_setup = dpp3_cnv_setup, + .dpp_program_degamma_pwl = NULL, + .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, + .dpp_program_cm_bias = dpp3_program_cm_bias, + + .dpp_program_blnd_lut = NULL, // BLNDGAM is removed completely in DCN3.2 DPP + .dpp_program_shaper_lut = NULL, // CM SHAPER block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) + .dpp_program_3dlut = NULL, // CM 3DLUT block is removed in DCN3.2 DPP, (it is in MPCC, programmable before or after BLND) + + .dpp_program_bias_and_scale = NULL, + .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer, + .set_cursor_attributes = dpp3_set_cursor_attributes, + .set_cursor_position = dpp1_set_cursor_position, + .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes, + .dpp_dppclk_control = dpp35_dppclk_control, + .dpp_set_hdr_multiplier = dpp3_set_hdr_multiplier, + .dpp_get_gamut_remap = dpp3_cm_get_gamut_remap, +}; + + +bool dpp35_construct( + struct dcn3_dpp *dpp, struct dc_context *ctx, + uint32_t inst, const struct dcn3_dpp_registers *tf_regs, + const struct dcn35_dpp_shift *tf_shift, + const struct dcn35_dpp_mask *tf_mask) +{ + bool ret = dpp32_construct(dpp, ctx, inst, tf_regs, (const struct dcn3_dpp_shift *)(tf_shift), (const struct dcn3_dpp_mask *)(tf_mask)); + + dpp->base.funcs = &dcn35_dpp_funcs; + return ret; } void dpp35_set_fgcg(struct dcn3_dpp *dpp, bool enable) diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h index 09b84307cd9e..135872d88219 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h @@ -31,7 +31,9 @@ #define DPP_REG_LIST_SH_MASK_DCN35(mask_sh) \ DPP_REG_LIST_SH_MASK_DCN30_COMMON(mask_sh), \ - TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh) + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DPP_FGCG_REP_DIS, mask_sh), \ + TF_SF(DPP_TOP0_DPP_CONTROL, DISPCLK_R_GATE_DISABLE, mask_sh) #define DPP_REG_FIELD_LIST_DCN35(type) \ struct { \ @@ -47,6 +49,11 @@ struct dcn35_dpp_mask { DPP_REG_FIELD_LIST_DCN35(uint32_t); }; +void dpp35_dppclk_control( + struct dpp *dpp_base, + bool dppclk_div, + bool enable); + bool dpp35_construct(struct dcn3_dpp *dpp3, struct dc_context *ctx, uint32_t inst, const struct dcn3_dpp_registers *tf_regs, const struct dcn35_dpp_shift *tf_shift, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 629a702bb3fa..9499295f4582 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -495,6 +495,17 @@ void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, } } +void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on) +{ + if (!hws->ctx->dc->debug.root_clock_optimization.bits.dpstream) + return; + + if (hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating) { + hws->ctx->dc->res_pool->dccg->funcs->set_dpstreamclk_root_clock_gating( + hws->ctx->dc->res_pool->dccg, dp_hpo_inst, clock_on); + } +} + void dcn35_dsc_pg_control( struct dce_hwseq *hws, unsigned int dsc_inst, @@ -1002,6 +1013,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false; + + if (pipe_ctx->stream_res.hpo_dp_stream_enc) + update_state->pg_pipe_res_update[PG_DPSTREAM][pipe_ctx->stream_res.hpo_dp_stream_enc->inst] = false; } /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/ for (i = 0; i < dc->res_pool->timing_generator_count; i++) { @@ -1059,6 +1073,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, if (j == PG_OPTC && new_pipe->stream_res.tg) update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true; + + if (j == PG_DPSTREAM && new_pipe->stream_res.hpo_dp_stream_enc) + update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true; } } else if (cur_pipe->plane_state == new_pipe->plane_state || cur_pipe == new_pipe) { @@ -1088,6 +1105,11 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, cur_pipe->stream_res.tg != new_pipe->stream_res.tg && new_pipe->stream_res.tg) update_state->pg_pipe_res_update[j][new_pipe->stream_res.tg->inst] = true; + + if (j == PG_DPSTREAM && + cur_pipe->stream_res.hpo_dp_stream_enc != new_pipe->stream_res.hpo_dp_stream_enc && + new_pipe->stream_res.hpo_dp_stream_enc) + update_state->pg_pipe_res_update[j][new_pipe->stream_res.hpo_dp_stream_enc->inst] = true; } } } @@ -1234,6 +1256,9 @@ void dcn35_root_clock_control(struct dc *dc, if (dc->hwseq->funcs.dpp_root_clock_control) dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); } + if (update_state->pg_pipe_res_update[PG_DPSTREAM][i]) + if (dc->hwseq->funcs.dpstream_root_clock_control) + dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on); } for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { if (update_state->pg_pipe_res_update[PG_DSC][i]) { @@ -1254,6 +1279,9 @@ void dcn35_root_clock_control(struct dc *dc, if (dc->hwseq->funcs.dpp_root_clock_control) dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on); } + if (update_state->pg_pipe_res_update[PG_DPSTREAM][i]) + if (dc->hwseq->funcs.dpstream_root_clock_control) + dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on); } } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h index 68f714143431..a731c8880d60 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h @@ -37,6 +37,8 @@ void dcn35_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool pow void dcn35_dpp_root_clock_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); +void dcn35_dpstream_root_clock_control(struct dce_hwseq *hws, unsigned int dp_hpo_inst, bool clock_on); + void dcn35_enable_power_gating_plane(struct dce_hwseq *hws, bool enable); void dcn35_set_dmu_fgcg(struct dce_hwseq *hws, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c index 19b861349954..df3bf77f3fb4 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c @@ -147,6 +147,7 @@ static const struct hwseq_private_funcs dcn35_private_funcs = { //.hubp_pg_control = dcn35_hubp_pg_control, .enable_power_gating_plane = dcn35_enable_power_gating_plane, .dpp_root_clock_control = dcn35_dpp_root_clock_control, + .dpstream_root_clock_control = dcn35_dpstream_root_clock_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn35_update_odm, .set_hdr_multiplier = dcn10_set_hdr_multiplier, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h index 554cfab5ab24..341219cf4144 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h @@ -120,6 +120,10 @@ struct hwseq_private_funcs { struct dce_hwseq *hws, unsigned int dpp_inst, bool clock_on); + void (*dpstream_root_clock_control)( + struct dce_hwseq *hws, + unsigned int dpp_inst, + bool clock_on); void (*dpp_pg_control)(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index 722eff84ccfd..d4c7885fc916 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -106,6 +106,10 @@ struct dccg_funcs { void (*otg_drop_pixel)(struct dccg *dccg, uint32_t otg_inst); void (*dccg_init)(struct dccg *dccg); + void (*set_dpstreamclk_root_clock_gating)( + struct dccg *dccg, + int dp_hpo_inst, + bool enable); void (*set_dpstreamclk)( struct dccg *dccg, -- cgit v1.2.3-70-g09d2 From af8999c57ab58faa13fcc6bde7cd1eac6b3ffa74 Mon Sep 17 00:00:00 2001 From: Leon Huang Date: Mon, 4 Mar 2024 16:52:25 +0800 Subject: drm/amd/display: Expand supported Replay residency mode [Why] Dmub provides several Replay residency calculation methods, but current interface only supports either ALPM or PHY mode [How] Modify the interface for supporting different types of Replay residency calculation. Reviewed-by: Robin Chen Acked-by: Roman Li Signed-off-by: Leon Huang Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/link.h | 2 +- .../gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c | 7 +++++-- .../gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index bf29fc58ea6a..7ab8ba5e23ed 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -288,7 +288,7 @@ struct link_service { struct dc_link *link, uint32_t coasting_vtotal); bool (*edp_replay_residency)(const struct dc_link *link, unsigned int *residency, const bool is_start, - const bool is_alpm); + const enum pr_residency_mode mode); bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, const unsigned int *power_opts, uint32_t coasting_vtotal); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index 0682dbbad448..689c5fb44e86 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -1056,7 +1056,7 @@ bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) } bool edp_replay_residency(const struct dc_link *link, - unsigned int *residency, const bool is_start, const bool is_alpm) + unsigned int *residency, const bool is_start, const enum pr_residency_mode mode) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1065,8 +1065,11 @@ bool edp_replay_residency(const struct dc_link *link, if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) return false; + if (!residency) + return false; + if (replay != NULL && link->replay_settings.replay_feature_enabled) - replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm); + replay->funcs->replay_residency(replay, panel_inst, residency, is_start, mode); else *residency = 0; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index eee8a4db6f85..cb6d95cc36e4 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -61,7 +61,7 @@ bool edp_send_replay_cmd(struct dc_link *link, union dmub_replay_cmd_set *cmd_data); bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal); bool edp_replay_residency(const struct dc_link *link, - unsigned int *residency, const bool is_start, const bool is_alpm); + unsigned int *residency, const bool is_start, const enum pr_residency_mode mode); bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, const unsigned int *power_opts, uint32_t coasting_vtotal); -- cgit v1.2.3-70-g09d2 From 029faefb7302f1079173410697b0e14d2e56e19a Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 28 Mar 2024 18:22:10 +0800 Subject: drm/amdgpu: implement IRQ_STATE_ENABLE for SDMA v4.4.2 SDMA_CNTL is not set in some cases, driver configures it by itself. v2: simplify code Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 71c2f50530cb..f8e2cd514493 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1602,19 +1602,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev, u32 sdma_cntl; sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL); - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, - DRAM_ECC_INT_ENABLE, 0); - WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl); - break; - /* sdma ecc interrupt is enabled by default - * driver doesn't need to do anything to - * enable the interrupt */ - case AMDGPU_IRQ_STATE_ENABLE: - default: - break; - } + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl); return 0; } -- cgit v1.2.3-70-g09d2 From 8b2cb32cf0c613fd937ebb49a331798985f50826 Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Mon, 11 Mar 2024 18:18:34 -0400 Subject: drm/amd/display: FEC overhead should be checked once for mst slot nums [Why] Mst slot nums equals to pbn / pbn_div. Today, pbn_div refers to dm_mst_get_pbn_divider -> dc_link_bandwidth_kbps. In dp_link_bandwidth_kbps, which includes effect of FEC overhead already. As result, we should not include effect of FEC overhead again while calculating pbn by kpbs_to_peak_pbn (stream_kbps). [How] Include FEC overhead within dp_link_bandwidth_kbps. Remove FEC overhead from kbps_to_peak_pbn. Reviewed-by: Wayne Lin Acked-by: Roman Li Signed-off-by: Hersen Wu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 37 ++++++---------------- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.h | 3 -- 2 files changed, 10 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index ad3170b72a47..0b03e659fdf3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -791,25 +791,12 @@ struct dsc_mst_fairness_params { struct amdgpu_dm_connector *aconnector; }; -static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) -{ - u8 link_coding_cap; - uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; - - link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); - if (link_coding_cap == DP_128b_132b_ENCODING) - fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; - - return fec_overhead_multiplier_x1000; -} - -static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) +static int kbps_to_peak_pbn(int kbps) { u64 peak_kbps = kbps; peak_kbps *= 1006; - peak_kbps *= fec_overhead_multiplier_x1000; - peak_kbps = div_u64(peak_kbps, 1000 * 1000); + peak_kbps = div_u64(peak_kbps, 1000); return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); } @@ -910,12 +897,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state, int link_timeslots_used; int fair_pbn_alloc; int ret = 0; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { initial_slack[i] = - kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; + kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i + k].pbn; bpp_increased[i] = false; remaining_to_increase += 1; } else { @@ -1011,7 +997,6 @@ static int try_disable_dsc(struct drm_atomic_state *state, int next_index; int remaining_to_try = 0; int ret; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled @@ -1041,7 +1026,7 @@ static int try_disable_dsc(struct drm_atomic_state *state, if (next_index == -1) break; - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, @@ -1054,7 +1039,8 @@ static int try_disable_dsc(struct drm_atomic_state *state, vars[next_index].dsc_enabled = false; vars[next_index].bpp_x16 = 0; } else { - vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000); + vars[next_index].pbn = kbps_to_peak_pbn( + params[next_index].bw_range.max_kbps); ret = drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, @@ -1083,7 +1069,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, int count = 0; int i, k, ret; bool debugfs_overwrite = false; - uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); memset(params, 0, sizeof(params)); @@ -1148,7 +1133,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, /* Try no compression */ for (i = 0; i < count; i++) { vars[i + k].aconnector = params[i].aconnector; - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, @@ -1167,7 +1152,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, /* Try max compression */ for (i = 0; i < count; i++) { if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1175,7 +1160,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, if (ret < 0) return ret; } else { - vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, @@ -1656,13 +1641,11 @@ enum dc_status dm_dp_mst_is_port_support_mode( */ int pbn_div, slot_num, max_slot_num; enum dc_link_encoding_format link_encoding; - uint16_t fec_overhead_multiplier_x1000 = - get_fec_overhead_multiplier(stream->link); uint32_t stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(stream->link)); - pbn = kbps_to_peak_pbn(stream_kbps, fec_overhead_multiplier_x1000); + pbn = kbps_to_peak_pbn(stream_kbps); pbn_div = dm_mst_get_pbn_divider(stream->link); slot_num = DIV_ROUND_UP(pbn, pbn_div); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 37c820ab0fdb..fa84d34b7373 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -46,9 +46,6 @@ #define SYNAPTICS_CASCADED_HUB_ID 0x5A #define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0) -#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031 -#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000 - enum mst_msg_ready_type { NONE_MSG_RDY_EVENT = 0, DOWN_REP_MSG_RDY_EVENT = 1, -- cgit v1.2.3-70-g09d2 From 60df5628144b59d5876f8ceac624a7661c336665 Mon Sep 17 00:00:00 2001 From: Joshua Aberback Date: Fri, 15 Mar 2024 04:07:42 -0400 Subject: drm/amd/display: handle invalid connector indices [Why] The function to count the number of valid connectors does not guarantee that the first n indices are valid, only that there exist n valid indices. When invalid indices are present, this results in later valid connectors being missed, as processing would end after checking n indices. [How] - count valid indices separately from total indices examined - add explicit definition of MAX_LINKS Reviewed-by: Dillon Varone Acked-by: Roman Li Signed-off-by: Joshua Aberback Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 2 +- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 2 +- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c | 2 +- drivers/gpu/drm/amd/display/dc/core/dc.c | 3 ++- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 1 + drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c | 4 ++-- 8 files changed, 10 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 5ee87965a078..bb4f3bd7532e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -503,7 +503,7 @@ static void dcn2_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index e3e1940198a9..f65bb4c21b7d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -548,7 +548,7 @@ static void rn_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct dc_l clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 3271c8c7905d..4cb0db0ed92f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -474,7 +474,7 @@ static void dcn30_notify_link_rate_change(struct clk_mgr *clk_mgr_base, struct d clk_mgr->cur_phyclk_req_table[link->link_index] = link->cur_link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - for (i = 0; i < MAX_PIPES * 2; i++) { + for (i = 0; i < MAX_LINKS; i++) { if (clk_mgr->cur_phyclk_req_table[i] > max_phyclk_req) max_phyclk_req = clk_mgr->cur_phyclk_req_table[i]; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 667655d0e5b9..c3510cdd0ec8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -212,7 +212,8 @@ static bool create_links( connectors_num, num_virtual_links); - for (i = 0; i < connectors_num; i++) { + // condition loop on link_count to allow skipping invalid indices + for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { struct link_init_data link_init_params = {0}; struct dc_link *link; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 29fd8daa9d15..3ed41cf6a59d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1327,7 +1327,7 @@ struct dc { struct dc_phy_addr_space_config vm_pa_config; uint8_t link_count; - struct dc_link *links[MAX_PIPES * 2]; + struct dc_link *links[MAX_LINKS]; struct link_service *link_srv; struct dc_state *current_state; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index f4d4a68c91dc..4ba18ea57aad 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -349,7 +349,7 @@ struct clk_mgr_internal { enum dm_pp_clocks_state cur_min_clks_state; bool periodic_retraining_disabled; - unsigned int cur_phyclk_req_table[MAX_PIPES * 2]; + unsigned int cur_phyclk_req_table[MAX_LINKS]; bool smu_present; void *wm_range_table; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index c1835ad6550f..c80ebb407add 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -44,6 +44,7 @@ */ #define MAX_PIPES 6 #define MAX_PHANTOM_PIPES (MAX_PIPES / 2) +#define MAX_LINKS (MAX_PIPES * 2) #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index 5491b707cec8..68a8fd7f84d0 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -166,7 +166,7 @@ static uint8_t get_lowest_dpia_index(struct dc_link *link) uint8_t idx = 0xFF; int i; - for (i = 0; i < MAX_PIPES * 2; ++i) { + for (i = 0; i < MAX_LINKS; ++i) { if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) @@ -196,7 +196,7 @@ static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_in struct dc_link *link_dpia_primary, *link_dpia_secondary; int total_bw = 0; - for (uint8_t i = 0; i < (MAX_PIPES * 2) - 1; ++i) { + for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) { if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) continue; -- cgit v1.2.3-70-g09d2 From 0453e5f2202ed77e470176f97c5d1436476cf63e Mon Sep 17 00:00:00 2001 From: ZhenGuo Yin Date: Fri, 29 Mar 2024 15:41:58 +0800 Subject: drm/amdgpu: select HDP ref/mask according to gfx ring pipe Use correct ref/mask for differnent gfx ring pipe. This should fix the gfx hang issue after enabling gfx pipe1. Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2117 Reviewed-by: Alex Deucher Signed-off-by: ZhenGuo Yin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d524f1a353ed..a01320e72653 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -8317,7 +8317,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) } reg_mem_engine = 0; } else { - ref_and_mask = nbio_hf_reg->ref_and_mask_cp0; + ref_and_mask = nbio_hf_reg->ref_and_mask_cp0 << ring->pipe; reg_mem_engine = 1; /* pfp */ } -- cgit v1.2.3-70-g09d2 From b7a1a0ef12b81957584fef7b61e2d5ec049c7209 Mon Sep 17 00:00:00 2001 From: Arunpravin Paneer Selvam Date: Mon, 6 Jun 2022 13:59:13 +0530 Subject: drm/amd/amdgpu: add pipe1 hardware support Enable pipe1 support starting from SIENNA CICHLID asic Link: https://gitlab.freedesktop.org/drm/amd/-/issues/2117 Reviewed-by: Alex Deucher Signed-off-by: Arunpravin Paneer Selvam Signed-off-by: ZhenGuo Yin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index a01320e72653..d5b924222903 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4518,7 +4518,7 @@ static int gfx_v10_0_sw_init(void *handle) case IP_VERSION(10, 3, 3): case IP_VERSION(10, 3, 7): adev->gfx.me.num_me = 1; - adev->gfx.me.num_pipe_per_me = 1; + adev->gfx.me.num_pipe_per_me = 2; adev->gfx.me.num_queue_per_pipe = 1; adev->gfx.mec.num_mec = 2; adev->gfx.mec.num_pipe_per_mec = 4; -- cgit v1.2.3-70-g09d2 From 3e2dacca540643ee35e3deb1d60873e7138a6af3 Mon Sep 17 00:00:00 2001 From: Danijel Slivka Date: Wed, 27 Mar 2024 23:56:23 +0100 Subject: drm/amdgpu: use vm_update_mode=0 as default in sriov for gfx10.3 onwards Apply this rule to all newer asics in sriov case. For asic with VF MMIO access protection avoid using CPU for VM table updates. CPU pagetable updates have issues with HDP flush as VF MMIO access protection blocks write to BIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL register during sriov runtime. Moved the check to amdgpu_device_init() to ensure it is done after amdgpu_device_ip_early_init() where the IP versions are discovered. Signed-off-by: Danijel Slivka Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 6 ------ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 12dc71a6b5db..66a5979e075d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4072,6 +4072,13 @@ int amdgpu_device_init(struct amdgpu_device *adev, /* Enable TMZ based on IP_VERSION */ amdgpu_gmc_tmz_set(adev); + if (amdgpu_sriov_vf(adev) && + amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0)) + /* VF MMIO access (except mailbox range) from CPU + * will be blocked during sriov runtime + */ + adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; + amdgpu_gmc_noretry_set(adev); /* Need to get xgmi info early to decide the reset behavior*/ if (adev->gmc.xgmi.supported) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index aed60aaf1a55..6f01de220c44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -724,12 +724,6 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } - if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) - /* VF MMIO access (except mailbox range) from CPU - * will be blocked during sriov runtime - */ - adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; - /* we have the ability to check now */ if (amdgpu_sriov_vf(adev)) { switch (adev->asic_type) { -- cgit v1.2.3-70-g09d2 From 89e5f42c049576340cae778ed7baa81d1e66b318 Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Fri, 15 Mar 2024 18:00:28 -0400 Subject: drm/amd/display: Add dmub additional interface support for FAMS [WHY&HOW] Update dmub and driver interface for future FAMS revisions. Reviewed-by: Anthony Koo Acked-by: Roman Li Signed-off-by: Dillon Varone Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c | 2 +- drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c | 2 +- drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 2 +- drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 2 +- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 6 +++++- 5 files changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index af3a26c2656b..1f3ddee1ec1b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -799,7 +799,7 @@ void dcn30_init_hw(struct dc *dc) // Get DMCUB capabilities dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; } void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index a760f0c6fe98..9ab475a87545 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -273,7 +273,7 @@ void dcn31_init_hw(struct dc *dc) // Get DMCUB capabilities dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; } void dcn31_dsc_pg_control( diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 0f522f8a7228..9aea4a088652 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -953,7 +953,7 @@ void dcn32_init_hw(struct dc *dc) dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support; dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; if (dc->ctx->dmub_srv->dmub->fw_version < DMUB_FW_VERSION(7, 0, 35)) { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 9499295f4582..c2275a8b4ecc 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -349,7 +349,7 @@ void dcn35_init_hw(struct dc *dc) if (dc->ctx->dmub_srv) { dc_dmub_srv_query_caps_cmd(dc->ctx->dmub_srv); dc->caps.dmub_caps.psr = dc->ctx->dmub_srv->dmub->feature_caps.psr; - dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch; + dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; } if (dc->res_pool->pg_cntl) { diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 3bd9911b6f3a..d0b581b8331c 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -97,6 +97,9 @@ /* Maximum number of planes on any ASIC. */ #define DMUB_MAX_PLANES 6 +/* Maximum number of phantom planes on any ASIC */ +#define DMUB_MAX_PHANTOM_PLANES ((DMUB_MAX_PLANES) / 2) + /* Trace buffer offset for entry */ #define TRACE_BUFFER_ENTRY_OFFSET 16 @@ -466,7 +469,7 @@ struct dmub_feature_caps { * Max PSR version supported by FW. */ uint8_t psr; - uint8_t fw_assisted_mclk_switch; + uint8_t fw_assisted_mclk_switch_ver; uint8_t reserved[4]; uint8_t subvp_psr_support; uint8_t gecc_enable; @@ -4667,6 +4670,7 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. */ struct dmub_rb_cmd_assr_enable assr_enable; + }; /** -- cgit v1.2.3-70-g09d2 From 5db346c256bbacc634ff515a1a9202cd4b61d8c7 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Tue, 19 Mar 2024 10:06:51 -0400 Subject: drm/amd/display: update pipe topology log to support subvp [why] There is an ambiguity in subvp pipe topology log. The log doesn't show subvp relation to main stream and it is not clear that certain stream is an internal stream for subvp pipes. [how] Separate subvp pipe topology logging from main pipe topology. Log main stream indices instead of the internal stream for subvp pipes. The following is a sample log showing 2 streams with subvp enabled on both: pipe topology update ________________________ | plane0 slice0 stream0| |DPP1----OPP1----OTG1----| | plane0 slice0 stream1| |DPP0----OPP0----OTG0----| | (phantom pipes) | | plane0 slice0 stream0| |DPP3----OPP3----OTG3----| | plane0 slice0 stream1| |DPP2----OPP2----OTG2----| |________________________| Reviewed-by: Alvin Lee Acked-by: Roman Li Signed-off-by: Wenjing Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 96 +++++++++++++++-------- 1 file changed, 65 insertions(+), 31 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 601af21b2df9..c4a3484554b0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2193,50 +2193,84 @@ static void resource_log_pipe(struct dc *dc, struct pipe_ctx *pipe, } } -void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) +static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state, + struct pipe_ctx *otg_master, int stream_idx) { - struct pipe_ctx *otg_master; struct pipe_ctx *opp_heads[MAX_PIPES]; struct pipe_ctx *dpp_pipes[MAX_PIPES]; - int stream_idx, slice_idx, dpp_idx, plane_idx, slice_count, dpp_count; + int slice_idx, dpp_idx, plane_idx, slice_count, dpp_count; bool is_primary; DC_LOGGER_INIT(dc->ctx->logger); + slice_count = resource_get_opp_heads_for_otg_master(otg_master, + &state->res_ctx, opp_heads); + for (slice_idx = 0; slice_idx < slice_count; slice_idx++) { + plane_idx = -1; + if (opp_heads[slice_idx]->plane_state) { + dpp_count = resource_get_dpp_pipes_for_opp_head( + opp_heads[slice_idx], + &state->res_ctx, + dpp_pipes); + for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) { + is_primary = !dpp_pipes[dpp_idx]->top_pipe || + dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state; + if (is_primary) + plane_idx++; + resource_log_pipe(dc, dpp_pipes[dpp_idx], + stream_idx, slice_idx, + plane_idx, slice_count, + is_primary); + } + } else { + resource_log_pipe(dc, opp_heads[slice_idx], + stream_idx, slice_idx, plane_idx, + slice_count, true); + } + + } +} + +static int resource_stream_to_stream_idx(struct dc_state *state, + struct dc_stream_state *stream) +{ + int i, stream_idx = -1; + + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] == stream) { + stream_idx = i; + break; + } + return stream_idx; +} + +void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state) +{ + struct pipe_ctx *otg_master; + int stream_idx, phantom_stream_idx; + DC_LOGGER_INIT(dc->ctx->logger); + DC_LOG_DC(" pipe topology update"); DC_LOG_DC(" ________________________"); for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) { + if (state->streams[stream_idx]->is_phantom) + continue; + otg_master = resource_get_otg_master_for_stream( &state->res_ctx, state->streams[stream_idx]); - if (!otg_master || otg_master->stream_res.tg == NULL) { - DC_LOG_DC("topology update: otg_master NULL stream_idx %d!\n", stream_idx); - return; - } - slice_count = resource_get_opp_heads_for_otg_master(otg_master, - &state->res_ctx, opp_heads); - for (slice_idx = 0; slice_idx < slice_count; slice_idx++) { - plane_idx = -1; - if (opp_heads[slice_idx]->plane_state) { - dpp_count = resource_get_dpp_pipes_for_opp_head( - opp_heads[slice_idx], - &state->res_ctx, - dpp_pipes); - for (dpp_idx = 0; dpp_idx < dpp_count; dpp_idx++) { - is_primary = !dpp_pipes[dpp_idx]->top_pipe || - dpp_pipes[dpp_idx]->top_pipe->plane_state != dpp_pipes[dpp_idx]->plane_state; - if (is_primary) - plane_idx++; - resource_log_pipe(dc, dpp_pipes[dpp_idx], - stream_idx, slice_idx, - plane_idx, slice_count, - is_primary); - } - } else { - resource_log_pipe(dc, opp_heads[slice_idx], - stream_idx, slice_idx, plane_idx, - slice_count, true); - } + resource_log_pipe_for_stream(dc, state, otg_master, stream_idx); + } + if (state->phantom_stream_count > 0) { + DC_LOG_DC(" | (phantom pipes) |"); + for (stream_idx = 0; stream_idx < state->stream_count; stream_idx++) { + if (state->stream_status[stream_idx].mall_stream_config.type != SUBVP_MAIN) + continue; + phantom_stream_idx = resource_stream_to_stream_idx(state, + state->stream_status[stream_idx].mall_stream_config.paired_stream); + otg_master = resource_get_otg_master_for_stream( + &state->res_ctx, state->streams[phantom_stream_idx]); + resource_log_pipe_for_stream(dc, state, otg_master, stream_idx); } } DC_LOG_DC(" |________________________|\n"); -- cgit v1.2.3-70-g09d2 From 14f9db4271ef5c78ae87237af844f03fb192d139 Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Tue, 19 Mar 2024 13:08:52 -0400 Subject: drm/amd/display: Enable DTBCLK DTO earlier in the sequence [why] As per programming guide, we need to enable the virtual pixel clock via DTBCLK DTO and ungate the clock before we begin programming OPP/OPTC control registers. Otherwise, the double-buffered registers will be left pending until the clocks are enabled. [how] Move the DTBCLK DTO programming up to where we do the legacy DP DTO programming. Reviewed-by: Nicholas Kazlauskas Acked-by: Roman Li Signed-off-by: Sung Joon Kim Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 32 +++++++++++----------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index f983041ce9a4..87b43cb50c1e 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -873,6 +873,22 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } + if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { + struct dccg *dccg = dc->res_pool->dccg; + struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct dtbclk_dto_params dto_params = {0}; + + if (dccg->funcs->set_dtbclk_p_src) + dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst); + + dto_params.otg_inst = tg->inst; + dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; + dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); + dto_params.timing = &pipe_ctx->stream->timing; + dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } + if (dc_is_hdmi_tmds_signal(stream->signal)) { stream->link->phy_state.symclk_ref_cnts.otg = 1; if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF) @@ -959,22 +975,6 @@ enum dc_status dcn20_enable_stream_timing( pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg); } - if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx)) { - struct dccg *dccg = dc->res_pool->dccg; - struct timing_generator *tg = pipe_ctx->stream_res.tg; - struct dtbclk_dto_params dto_params = {0}; - - if (dccg->funcs->set_dtbclk_p_src) - dccg->funcs->set_dtbclk_p_src(dccg, DTBCLK0, tg->inst); - - dto_params.otg_inst = tg->inst; - dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10; - dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); - dto_params.timing = &pipe_ctx->stream->timing; - dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); - } - return DC_OK; } -- cgit v1.2.3-70-g09d2 From beb9764aad3c2b2c14bf957221f42e326d097680 Mon Sep 17 00:00:00 2001 From: George Shen Date: Fri, 8 Mar 2024 15:08:09 -0500 Subject: drm/amd/display: Add dummy interface for tracing DCN32 SMU messages [Why/How] Some issues may require a trace of the previous SMU messages from DC to understand the context and aid in debugging. Actual logging to be implemented when needed. Reviewed-by: Josip Pavic Acked-by: Roman Li Signed-off-by: George Shen Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 8 ++++++++ .../drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c | 5 ++++- .../drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c | 9 +++++++++ drivers/gpu/drm/amd/display/dc/dm_services.h | 10 ++++++++++ 4 files changed, 31 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index d9e33c6bccd9..0005f5f8f34f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -52,4 +52,12 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc func_name, line); } +void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx) +{ +} + +void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx) +{ +} + /**** power component interfaces ****/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c index bdbf18306698..64c2b88dfc9f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c @@ -54,6 +54,7 @@ */ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) { + const uint32_t initial_max_retries = max_retries; uint32_t reg = 0; do { @@ -69,7 +70,7 @@ static uint32_t dcn30_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un /* handle DALSMC_Result_CmdRejectedBusy? */ - /* Log? */ + TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); return reg; } @@ -89,6 +90,8 @@ static bool dcn30_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + result = dcn30_smu_wait_for_response(clk_mgr, 10, 200000); if (IS_SMU_TIMEOUT(result)) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c index df244b175fdb..f2f60478b1a6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c @@ -49,6 +49,7 @@ */ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsigned int delay_us, unsigned int max_retries) { + const uint32_t initial_max_retries = max_retries; uint32_t reg = 0; do { @@ -62,6 +63,8 @@ static uint32_t dcn32_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un udelay(delay_us); } while (max_retries--); + TRACE_SMU_DELAY(delay_us * (initial_max_retries - max_retries), clk_mgr->base.ctx); + return reg; } @@ -79,6 +82,8 @@ static bool dcn32_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, uint /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + /* Wait for response */ if (dcn32_smu_wait_for_response(clk_mgr, 10, 200000) == DALSMC_Result_OK) { if (param_out) @@ -115,6 +120,8 @@ static uint32_t dcn32_smu_wait_for_response_delay(struct clk_mgr_internal *clk_m *total_delay_us += delay_us; } while (max_retries--); + TRACE_SMU_DELAY(*total_delay_us, clk_mgr->base.ctx); + return reg; } @@ -135,6 +142,8 @@ static bool dcn32_smu_send_msg_with_param_delay(struct clk_mgr_internal *clk_mgr /* Trigger the message transaction by writing the message ID */ REG_WRITE(DAL_MSG_REG, msg_id); + TRACE_SMU_MSG(msg_id, param_in, clk_mgr->base.ctx); + /* Wait for response */ if (dcn32_smu_wait_for_response_delay(clk_mgr, 10, 200000, &delay2_us) == DALSMC_Result_OK) { if (param_out) diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index d0eed3b4771e..9405c47ee2a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -274,6 +274,16 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc #define PERF_TRACE() dm_perf_trace_timestamp(__func__, __LINE__, CTX) #define PERF_TRACE_CTX(__CTX) dm_perf_trace_timestamp(__func__, __LINE__, __CTX) +/* + * SMU message tracing + */ +void dm_trace_smu_msg(uint32_t msg_id, uint32_t param_in, struct dc_context *ctx); +void dm_trace_smu_delay(uint32_t delay, struct dc_context *ctx); + +#define TRACE_SMU_MSG(msg_id, param_in, ctx) dm_trace_smu_msg(msg_id, param_in, ctx) +#define TRACE_SMU_DELAY(response_delay, ctx) dm_trace_smu_delay(response_delay, ctx) + + /* * DMUB Interfaces */ -- cgit v1.2.3-70-g09d2 From dbfb51d1d4e09b572478288d45091f7505a1926d Mon Sep 17 00:00:00 2001 From: Daniel Miess Date: Fri, 15 Mar 2024 15:26:33 -0400 Subject: drm/amd/display: Enable RCO for HDMISTREAMCLK in DCN35 [Why & How] Enable root clock optimization for HDMISTREAMCLK and only disable it when it's actively being used. Reviewed-by: Charlene Liu Acked-by: Roman Li Signed-off-by: Daniel Miess Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3ed41cf6a59d..b026004b713a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -696,6 +696,7 @@ enum pg_hw_pipe_resources { PG_OPP, PG_OPTC, PG_DPSTREAM, + PG_HDMISTREAM, PG_HW_PIPE_RESOURCES_NUM_ELEMENT }; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index c2275a8b4ecc..b94a85380d73 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -988,6 +988,9 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (!hpo_frl_stream_enc_acquired && !hpo_dp_stream_enc_acquired) update_state->pg_res_update[PG_HPO] = true; + if (hpo_frl_stream_enc_acquired) + update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; + update_state->pg_res_update[PG_DWB] = true; for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1125,6 +1128,9 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, if (hpo_frl_stream_enc_acquired || hpo_dp_stream_enc_acquired) update_state->pg_res_update[PG_HPO] = true; + if (hpo_frl_stream_enc_acquired) + update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; + } /** @@ -1249,7 +1255,7 @@ void dcn35_root_clock_control(struct dc *dc, if (!pg_cntl) return; /*enable root clock first when power up*/ - if (power_on) + if (power_on) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { @@ -1260,6 +1266,8 @@ void dcn35_root_clock_control(struct dc *dc, if (dc->hwseq->funcs.dpstream_root_clock_control) dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on); } + + } for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { if (update_state->pg_pipe_res_update[PG_DSC][i]) { if (power_on) { @@ -1272,7 +1280,7 @@ void dcn35_root_clock_control(struct dc *dc, } } /*disable root clock first when power down*/ - if (!power_on) + if (!power_on) { for (i = 0; i < dc->res_pool->pipe_count; i++) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { @@ -1283,6 +1291,8 @@ void dcn35_root_clock_control(struct dc *dc, if (dc->hwseq->funcs.dpstream_root_clock_control) dc->hwseq->funcs.dpstream_root_clock_control(dc->hwseq, i, power_on); } + + } } void dcn35_prepare_bandwidth( -- cgit v1.2.3-70-g09d2 From b3f98c00c4157c8c437bcdab91c38aa8961a4722 Mon Sep 17 00:00:00 2001 From: Duncan Ma Date: Fri, 8 Mar 2024 09:28:18 -0500 Subject: drm/amd/display: Allow HPO PG for DCN35 [Why] HPO can be power gated unconditionally for DCN35. [How] Set disable flag to false. Reviewed-by: Nicholas Kazlauskas Acked-by: Roman Li Signed-off-by: Duncan Ma Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 91c6eff79282..f1efce1bfbec 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -721,7 +721,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dpp_power_gate = true, .disable_hubp_power_gate = true, .disable_optc_power_gate = true, /*should the same as above two*/ - .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/ + .disable_hpo_power_gate = false, /*dmubfw force domain25 on*/ .disable_clock_gate = false, .disable_dsc_power_gate = true, .vsr_support = true, -- cgit v1.2.3-70-g09d2 From cf82a80a1456e9b4323852164a51a24dffc50515 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Fri, 15 Mar 2024 21:25:25 -0600 Subject: drm/amd/display: Skip on writeback when it's not applicable [WHY] dynamic memory safety error detector (KASAN) catches and generates error messages "BUG: KASAN: slab-out-of-bounds" as writeback connector does not support certain features which are not initialized. [HOW] Skip them when connector type is DRM_MODE_CONNECTOR_WRITEBACK. Link: https://gitlab.freedesktop.org/drm/amd/-/issues/3199 Reviewed-by: Harry Wentland Reviewed-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Hung Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 5336d3615580..1fe6c8a8d21b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3047,6 +3047,10 @@ static int dm_resume(void *handle) /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) @@ -5926,6 +5930,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, &aconnector->base.probed_modes : &aconnector->base.modes; + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return NULL; + if (aconnector->freesync_vid_base.clock != 0) return &aconnector->freesync_vid_base; @@ -8767,10 +8774,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; +notify: if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; -notify: aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); -- cgit v1.2.3-70-g09d2 From 75d5f90df1d090f2cd449b6e4f4876ac38c510e5 Mon Sep 17 00:00:00 2001 From: "Leo (Hanghong) Ma" Date: Tue, 19 Mar 2024 13:01:53 -0400 Subject: drm/amd/display: Add OTG check for set AV mute [Why && How] OTG can be disabled before setting dpms on. Add check to skip wait when setting AV mute if OTG is disabled. Reviewed-by: Wenjing Liu Acked-by: Roman Li Signed-off-by: Leo (Hanghong) Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 1f3ddee1ec1b..774605a029e8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -813,7 +813,7 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) enable); /* Wait for two frame to make sure AV mute is sent out */ - if (enable) { + if (enable && pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg)) { pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); -- cgit v1.2.3-70-g09d2 From 7eb9d1e0ebad5ece64ccab0606bcf21faee1e6eb Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Wed, 20 Mar 2024 13:27:58 -0400 Subject: drm/amd/display: Add extra logging for HUBP and OTG [Description] Add extra logging for DCSURF_FLIP_CNTL, DCHUBP_CNTL, OTG_MASTER_EN, and OTG_DOUBLE_BUFFER_CONTROL for more debuggability for a system crash. Reviewed-by: Samson Tam Acked-by: Roman Li Signed-off-by: Alvin Lee Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 1 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c | 3 +++ drivers/gpu/drm/amd/display/dc/inc/hw/optc.h | 2 ++ drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c | 3 +++ 5 files changed, 15 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 09784222cc03..69119b2fdce2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -692,6 +692,7 @@ struct dcn_hubp_state { uint32_t primary_meta_addr_hi; uint32_t uclk_pstate_force; uint32_t hubp_cntl; + uint32_t flip_control; }; struct dcn10_hubp { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 89c3bf0fe0c9..6bba020ad6fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -1331,6 +1331,12 @@ void hubp2_read_state(struct hubp *hubp) SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); + if (REG(DCHUBP_CNTL)) + s->hubp_cntl = REG_READ(DCHUBP_CNTL); + + if (REG(DCSURF_FLIP_CONTROL)) + s->flip_control = REG_READ(DCSURF_FLIP_CONTROL); + } static void hubp2_validate_dml_output(struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index 75547ce86c09..60a64d290352 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -455,6 +455,9 @@ void hubp3_read_state(struct hubp *hubp) if (REG(DCHUBP_CNTL)) s->hubp_cntl = REG_READ(DCHUBP_CNTL); + if (REG(DCSURF_FLIP_CONTROL)) + s->flip_control = REG_READ(DCSURF_FLIP_CONTROL); + } void hubp3_setup( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h index 9a8bf6ec70ea..8d32e525f05a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h @@ -93,6 +93,8 @@ struct dcn_otg_state { uint32_t vertical_interrupt1_line; uint32_t vertical_interrupt2_en; uint32_t vertical_interrupt2_line; + uint32_t otg_master_update_lock; + uint32_t otg_double_buffer_control; }; void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c index 0e8f4f36c87c..f109a101d84f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c @@ -1383,6 +1383,9 @@ void optc1_read_otg_state(struct optc *optc1, REG_GET(OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, &s->vertical_interrupt2_line); + + s->otg_master_update_lock = REG_READ(OTG_MASTER_UPDATE_LOCK); + s->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL); } bool optc1_get_otg_active_size(struct timing_generator *optc, -- cgit v1.2.3-70-g09d2 From 211a06dfe54aa52f55dcbfdf1fd984038bf60577 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 20 Mar 2024 15:36:09 -0400 Subject: drm/amd/display: Disable Z8 minimum stutter period check for DCN35 [Why] The threshold is no longer useful for blocking suboptimal power states for DCN35 based on real measurement. [How] Reduce to the minimum threshold duration, 1us. Reviewed-by: Gabe Teeger Acked-by: Roman Li Signed-off-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index f1efce1bfbec..479641fedcd4 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -764,7 +764,7 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ - .minimum_z8_residency_time = 2100, + .minimum_z8_residency_time = 1, /* Always allow when other conditions are met */ .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, -- cgit v1.2.3-70-g09d2 From de2d1105a3757742b45b0d8270b3c8734cd6b6f8 Mon Sep 17 00:00:00 2001 From: "Xi (Alex) Liu" Date: Wed, 20 Mar 2024 16:44:27 -0400 Subject: drm/amd/display: add root clock control function pointer to fix display corruption [Why and how] External display has corruption because no root clock control function. Add the function pointer to fix the issue. Reviewed-by: Daniel Miess Reviewed-by: Nicholas Kazlauskas Acked-by: Roman Li Signed-off-by: Xi (Alex) Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index b5f4bae16177..c54f3518c947 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -145,6 +145,7 @@ static const struct hwseq_private_funcs dcn351_private_funcs = { //.hubp_pg_control = dcn35_hubp_pg_control, .enable_power_gating_plane = dcn35_enable_power_gating_plane, .dpp_root_clock_control = dcn35_dpp_root_clock_control, + .dpstream_root_clock_control = dcn35_dpstream_root_clock_control, .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree, .update_odm = dcn35_update_odm, .set_hdr_multiplier = dcn10_set_hdr_multiplier, -- cgit v1.2.3-70-g09d2 From c435bce6af9b2a277662698875a689c389358f17 Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Thu, 21 Mar 2024 11:06:06 -0400 Subject: drm/amd/display: Add extra DMUB logging to track message timeout [Description] - Add logging for first DMUB inbox message that timed out to diagnostic data - It is useful to track the first failed message for debug purposes because once DMUB becomes hung (typically on a message), it will remain hung and all subsequent messages. In these cases we're interested in knowing which is the first message that failed. Reviewed-by: Josip Pavic Acked-by: Roman Li Signed-off-by: Alvin Lee Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 10 +++++++++- drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 17 ++++++++++++++++- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 1 + drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c | 1 + drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c | 2 ++ drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c | 1 + 6 files changed, 30 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 9ae0e602e737..34d199b08dd9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -23,6 +23,7 @@ * */ +#include "dm_services.h" #include "dc.h" #include "dc_dmub_srv.h" #include "../dmub/dmub_srv.h" @@ -198,6 +199,11 @@ bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, if (status != DMUB_STATUS_OK) { DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status); + if (!dmub->debug.timeout_occured) { + dmub->debug.timeout_occured = true; + dmub->debug.timeout_cmd = *cmd_list; + dmub->debug.timestamp = dm_get_timestamp(dc_dmub_srv->ctx); + } dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); return false; } @@ -904,6 +910,7 @@ bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmu void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) { struct dmub_diagnostic_data diag_data = {0}; + uint32_t i; if (!dc_dmub_srv || !dc_dmub_srv->dmub) { DC_LOG_ERROR("%s: invalid parameters.", __func__); @@ -935,7 +942,8 @@ void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) DC_LOG_DEBUG(" scratch [13] : %08x", diag_data.scratch[13]); DC_LOG_DEBUG(" scratch [14] : %08x", diag_data.scratch[14]); DC_LOG_DEBUG(" scratch [15] : %08x", diag_data.scratch[15]); - DC_LOG_DEBUG(" pc : %08x", diag_data.pc); + for (i = 0; i < DMUB_PC_SNAPSHOT_COUNT; i++) + DC_LOG_DEBUG(" pc[%d] : %08x", i, diag_data.pc[i]); DC_LOG_DEBUG(" unk_fault_addr : %08x", diag_data.undefined_address_fault_addr); DC_LOG_DEBUG(" inst_fault_addr : %08x", diag_data.inst_fetch_fault_addr); DC_LOG_DEBUG(" data_fault_addr : %08x", diag_data.data_write_fault_addr); diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 7785908a6676..662bdb0e5d3d 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -71,6 +71,8 @@ extern "C" { #endif +#define DMUB_PC_SNAPSHOT_COUNT 10 + /* Forward declarations */ struct dmub_srv; struct dmub_srv_common_regs; @@ -299,6 +301,17 @@ struct dmub_srv_hw_params { enum dmub_ips_disable_type disable_ips; }; +/** + * struct dmub_srv_debug - Debug info for dmub_srv + * @timeout_occured: Indicates a timeout occured on any message from driver to dmub + * @timeout_cmd: first cmd sent from driver that timed out - subsequent timeouts are not stored + */ +struct dmub_srv_debug { + bool timeout_occured; + union dmub_rb_cmd timeout_cmd; + unsigned long long timestamp; +}; + /** * struct dmub_diagnostic_data - Diagnostic data retrieved from DMCUB for * debugging purposes, including logging, crash analysis, etc. @@ -306,7 +319,7 @@ struct dmub_srv_hw_params { struct dmub_diagnostic_data { uint32_t dmcub_version; uint32_t scratch[17]; - uint32_t pc; + uint32_t pc[DMUB_PC_SNAPSHOT_COUNT]; uint32_t undefined_address_fault_addr; uint32_t inst_fetch_fault_addr; uint32_t data_write_fault_addr; @@ -317,6 +330,7 @@ struct dmub_diagnostic_data { uint32_t inbox0_wptr; uint32_t inbox0_size; uint32_t gpint_datain0; + struct dmub_srv_debug timeout_info; uint8_t is_dmcub_enabled : 1; uint8_t is_dmcub_soft_reset : 1; uint8_t is_dmcub_secure_reset : 1; @@ -506,6 +520,7 @@ struct dmub_srv { struct dmub_visual_confirm_color visual_confirm_color; enum dmub_srv_power_state_type power_state; + struct dmub_srv_debug debug; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index cae96fba6349..e500ca9ae09c 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -472,4 +472,5 @@ void dmub_dcn20_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; + diag_data->timeout_info = dmub->debug; } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 2bcf5fb87dd9..662c34e9495c 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -466,6 +466,7 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); diag_data->is_cw6_enabled = is_cw6_enabled; + diag_data->timeout_info = dmub->debug; } bool dmub_dcn31_should_detect(struct dmub_srv *dmub) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c index 0d521eeda050..e1da270502cc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c @@ -478,6 +478,8 @@ void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->is_cw6_enabled = is_cw6_enabled; diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); + + diag_data->timeout_info = dmub->debug; } void dmub_dcn32_configure_dmub_in_system_memory(struct dmub_srv *dmub) { diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 53f359f3fae2..98afaecd3984 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -516,6 +516,7 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti diag_data->is_cw6_enabled = is_cw6_enabled; diag_data->gpint_datain0 = REG_READ(DMCUB_GPINT_DATAIN0); + diag_data->timeout_info = dmub->debug; } void dmub_dcn35_configure_dmub_in_system_memory(struct dmub_srv *dmub) { -- cgit v1.2.3-70-g09d2 From cc263c3a0c9f38ea245393daa07eaa8e21ee17ae Mon Sep 17 00:00:00 2001 From: Joshua Aberback Date: Thu, 7 Mar 2024 05:13:04 -0500 Subject: drm/amd/display: remove context->dml2 dependency from DML21 wrapper [Why] When the DML2 wrapper explicitly accesses context->dml2, that creates a dependency on where dc saves the DML object. This dependency makes it harder to have multiple co-existing DML objects, which we would like to have for upcoming functionality. [How] - make all DML21 interfaces take in a DML2 object as parameter - remove all references to context->dml2, use parameter instead Reviewed-by: Jun Lei Reviewed-by: Aric Cyr Acked-by: Roman Li Signed-off-by: Joshua Aberback Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c | 6 +++--- drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 1 + drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c | 2 +- drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c | 2 +- drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c | 2 +- 5 files changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index e2489eaabb20..abf946f089b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -690,13 +690,13 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d } } -bool dml2_validate(const struct dc *in_dc, struct dc_state *context, bool fast_validate) +bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate) { bool out = false; - if (!(context->bw_ctx.dml2)) + if (!dml2) return false; - dml2_apply_debug_options(in_dc, context->bw_ctx.dml2); + dml2_apply_debug_options(in_dc, dml2); /* Use dml_validate_only for fast_validate path */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 54aff9beb73a..20fd5b8e5117 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -268,6 +268,7 @@ void dml2_reinit(const struct dc *in_dc, */ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, + struct dml2_context *dml2, bool fast_validate); /* diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 0c8dd71148b4..61fd0f2e69fe 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1799,7 +1799,7 @@ bool dcn32_validate_bandwidth(struct dc *dc, bool out = false; if (dc->debug.using_dml2) - out = dml2_validate(dc, context, fast_validate); + out = dml2_validate(dc, context, context->bw_ctx.dml2, fast_validate); else out = dml1_validate(dc, context, fast_validate); return out; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 479641fedcd4..e0544484e8c0 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -1734,7 +1734,7 @@ static bool dcn35_validate_bandwidth(struct dc *dc, { bool out = false; - out = dml2_validate(dc, context, fast_validate); + out = dml2_validate(dc, context, context->bw_ctx.dml2, fast_validate); if (fast_validate) return out; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 07ac9f3130b7..690e1ade361e 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1714,7 +1714,7 @@ static bool dcn351_validate_bandwidth(struct dc *dc, { bool out = false; - out = dml2_validate(dc, context, fast_validate); + out = dml2_validate(dc, context, context->bw_ctx.dml2, fast_validate); if (fast_validate) return out; -- cgit v1.2.3-70-g09d2 From e779f4587f6178583930caa87897b1be780d5ac7 Mon Sep 17 00:00:00 2001 From: Joshua Aberback Date: Thu, 7 Mar 2024 05:20:03 -0500 Subject: drm/amd/display: Add handling for DC power mode [Why] Future implementations will require a distinction between AC power and DC power (wall power and battery power, respectively). To accomplish this, adding a power mode parameter to certain dc interfaces, and adding a separate DML2 instance for DC mode validation. Default behaviour unchanged. Reviewed-by: Jun Lei Reviewed-by: Aric Cyr Acked-by: Roman Li Signed-off-by: Joshua Aberback Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 17 +++++++--- drivers/gpu/drm/amd/display/dc/core/dc.c | 37 ++++++++++++---------- drivers/gpu/drm/amd/display/dc/core/dc_state.c | 29 +++++++++++++++-- drivers/gpu/drm/amd/display/dc/dc.h | 13 +++++--- drivers/gpu/drm/amd/display/dc/dc_state.h | 2 +- drivers/gpu/drm/amd/display/dc/dc_stream.h | 8 ----- drivers/gpu/drm/amd/display/dc/dc_types.h | 16 ++++++++++ drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 2 ++ drivers/gpu/drm/amd/display/dc/inc/core_types.h | 3 ++ .../amd/display/dc/resource/dcn32/dcn32_resource.c | 16 ++++++++-- .../display/dc/resource/dcn321/dcn321_resource.c | 12 ++++++- .../amd/display/dc/resource/dcn35/dcn35_resource.c | 4 ++- .../display/dc/resource/dcn351/dcn351_resource.c | 4 ++- 13 files changed, 120 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1fe6c8a8d21b..a032506f1588 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2629,6 +2629,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) int i; struct dc_stream_state *del_streams[MAX_PIPES]; int del_streams_count = 0; + struct dc_commit_streams_params params = {}; memset(del_streams, 0, sizeof(del_streams)); @@ -2655,7 +2656,9 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) goto fail; } - res = dc_commit_streams(dc, context->streams, context->stream_count); + params.streams = context->streams; + params.stream_count = context->stream_count; + res = dc_commit_streams(dc, ¶ms); fail: dc_state_release(context); @@ -2877,6 +2880,7 @@ static int dm_resume(void *handle) struct dc_state *dc_state; int i, r, j, ret; bool need_hotplug = false; + struct dc_commit_streams_params commit_params = {}; if (dm->dc->caps.ips_support) { dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false); @@ -2926,7 +2930,9 @@ static int dm_resume(void *handle) dc_enable_dmub_outbox(adev->dm.dc); } - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + commit_params.streams = dc_state->streams; + commit_params.stream_count = dc_state->stream_count; + WARN_ON(!dc_commit_streams(dm->dc, &commit_params)); dm_gpureset_commit_state(dm->cached_dc_state, dm); @@ -2943,7 +2949,7 @@ static int dm_resume(void *handle) } /* Recreate dc_state - DC invalidates it when setting power state to S3. */ dc_state_release(dm_state->context); - dm_state->context = dc_state_create(dm->dc); + dm_state->context = dc_state_create(dm->dc, NULL); /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ /* Before powering on DC we need to re-initialize DMUB. */ @@ -6802,7 +6808,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, if (!dc_plane_state) goto cleanup; - dc_state = dc_state_create(dc); + dc_state = dc_state_create(dc, NULL); if (!dc_state) goto cleanup; @@ -8857,6 +8863,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, struct drm_connector *connector; bool mode_set_reset_required = false; u32 i; + struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; /* Disable writeback */ for_each_old_connector_in_state(state, connector, old_con_state, i) { @@ -8993,7 +9000,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, dm_enable_per_frame_crtc_master_sync(dc_state); mutex_lock(&dm->dc_lock); - WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + WARN_ON(!dc_commit_streams(dm->dc, ¶ms)); /* Allow idle optimization when vblank count is 0 for display off */ if (dm->active_vblank_irq_count == 0) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index c3510cdd0ec8..f44025eacc0a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1089,8 +1089,7 @@ static bool dc_construct(struct dc *dc, * is initialized in dc_create_resource_pool because * on creation it copies the contents of dc->dml */ - - dc->current_state = dc_state_create(dc); + dc->current_state = dc_state_create(dc, NULL); if (!dc->current_state) { dm_error("%s: failed to create validate ctx\n", __func__); @@ -2135,9 +2134,7 @@ static bool commit_minimal_transition_state(struct dc *dc, * Return DC_OK if everything work as expected, otherwise, return a dc_status * code. */ -enum dc_status dc_commit_streams(struct dc *dc, - struct dc_stream_state *streams[], - uint8_t stream_count) +enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params) { int i, j; struct dc_state *context; @@ -2146,18 +2143,22 @@ enum dc_status dc_commit_streams(struct dc *dc, struct pipe_ctx *pipe; bool handle_exit_odm2to1 = false; + if (!params) + return DC_ERROR_UNEXPECTED; + if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW) return res; - if (!streams_changed(dc, streams, stream_count)) + if (!streams_changed(dc, params->streams, params->stream_count) && + dc->current_state->power_source == params->power_source) return res; dc_exit_ips_for_hw_access(dc); - DC_LOG_DC("%s: %d streams\n", __func__, stream_count); + DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count); - for (i = 0; i < stream_count; i++) { - struct dc_stream_state *stream = streams[i]; + for (i = 0; i < params->stream_count; i++) { + struct dc_stream_state *stream = params->streams[i]; struct dc_stream_status *status = dc_stream_get_status(stream); dc_stream_log(dc, stream); @@ -2175,7 +2176,7 @@ enum dc_status dc_commit_streams(struct dc *dc, * scenario, it uses extra pipes than needed to reduce power consumption * We need to switch off this feature to make room for new streams. */ - if (stream_count > dc->current_state->stream_count && + if (params->stream_count > dc->current_state->stream_count && dc->current_state->stream_count == 1) { for (i = 0; i < dc->res_pool->pipe_count; i++) { pipe = &dc->current_state->res_ctx.pipe_ctx[i]; @@ -2191,7 +2192,9 @@ enum dc_status dc_commit_streams(struct dc *dc, if (!context) goto context_alloc_fail; - res = dc_validate_with_context(dc, set, stream_count, context, false); + context->power_source = params->power_source; + + res = dc_validate_with_context(dc, set, params->stream_count, context, false); if (res != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; @@ -2199,16 +2202,16 @@ enum dc_status dc_commit_streams(struct dc *dc, res = dc_commit_state_no_check(dc, context); - for (i = 0; i < stream_count; i++) { + for (i = 0; i < params->stream_count; i++) { for (j = 0; j < context->stream_count; j++) { - if (streams[i]->stream_id == context->streams[j]->stream_id) - streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; + if (params->streams[i]->stream_id == context->streams[j]->stream_id) + params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst; - if (dc_is_embedded_signal(streams[i]->signal)) { - struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]); + if (dc_is_embedded_signal(params->streams[i]->signal)) { + struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]); if (dc->hwss.is_abm_supported) - status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]); + status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]); else status->is_abm_supported = true; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index d546ea71026d..d1d326e9b9b6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -188,8 +188,11 @@ static void init_state(struct dc *dc, struct dc_state *state) } /* Public dc_state functions */ -struct dc_state *dc_state_create(struct dc *dc) +struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params) { +#ifdef CONFIG_DRM_AMD_DC_FP + struct dml2_configuration_options dml2_opt = dc->dml2_options; +#endif struct dc_state *state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL); @@ -198,10 +201,16 @@ struct dc_state *dc_state_create(struct dc *dc) init_state(dc, state); dc_state_construct(dc, state); + state->power_source = params ? params->power_source : DC_POWER_SOURCE_AC; #ifdef CONFIG_DRM_AMD_DC_FP - if (dc->debug.using_dml2) - dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2); + if (dc->debug.using_dml2) { + dml2_opt.use_clock_dc_limits = false; + dml2_create(dc, &dml2_opt, &state->bw_ctx.dml2); + + dml2_opt.use_clock_dc_limits = true; + dml2_create(dc, &dml2_opt, &state->bw_ctx.dml2_dc_power_source); + } #endif kref_init(&state->refcount); @@ -214,6 +223,7 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state) struct kref refcount = dst_state->refcount; #ifdef CONFIG_DRM_AMD_DC_FP struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2; + struct dml2_context *dst_dml2_dc_power_source = dst_state->bw_ctx.dml2_dc_power_source; #endif dc_state_copy_internal(dst_state, src_state); @@ -222,6 +232,10 @@ void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state) dst_state->bw_ctx.dml2 = dst_dml2; if (src_state->bw_ctx.dml2) dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2); + + dst_state->bw_ctx.dml2_dc_power_source = dst_dml2_dc_power_source; + if (src_state->bw_ctx.dml2_dc_power_source) + dml2_copy(dst_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source); #endif /* context refcount should not be overridden */ @@ -245,6 +259,12 @@ struct dc_state *dc_state_create_copy(struct dc_state *src_state) dc_state_release(new_state); return NULL; } + + if (src_state->bw_ctx.dml2_dc_power_source && + !dml2_create_copy(&new_state->bw_ctx.dml2_dc_power_source, src_state->bw_ctx.dml2_dc_power_source)) { + dc_state_release(new_state); + return NULL; + } #endif kref_init(&new_state->refcount); @@ -326,6 +346,9 @@ static void dc_state_free(struct kref *kref) #ifdef CONFIG_DRM_AMD_DC_FP dml2_destroy(state->bw_ctx.dml2); state->bw_ctx.dml2 = 0; + + dml2_destroy(state->bw_ctx.dml2_dc_power_source); + state->bw_ctx.dml2_dc_power_source = 0; #endif kvfree(state); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index b026004b713a..4d087137de3b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1504,10 +1504,15 @@ bool dc_acquire_release_mpc_3dlut( bool dc_resource_is_dsc_encoding_supported(const struct dc *dc); void get_audio_check(struct audio_info *aud_modes, struct audio_check *aud_chk); - -enum dc_status dc_commit_streams(struct dc *dc, - struct dc_stream_state *streams[], - uint8_t stream_count); +/* + * Set up streams and links associated to drive sinks + * The streams parameter is an absolute set of all active streams. + * + * After this call: + * Phy, Encoder, Timing Generator are programmed and enabled. + * New streams are enabled with blank stream; no memory read. + */ +enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params); struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h index a8979c9defe4..caa45db50232 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_state.h +++ b/drivers/gpu/drm/amd/display/dc/dc_state.h @@ -29,7 +29,7 @@ #include "dc.h" #include "inc/core_status.h" -struct dc_state *dc_state_create(struct dc *dc); +struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params); void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state); struct dc_state *dc_state_create_copy(struct dc_state *src_state); void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state); diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 68dc668f3e14..e5dbbc6089a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -427,14 +427,6 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc, enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream); -/* - * Set up streams and links associated to drive sinks - * The streams parameter is an absolute set of all active streams. - * - * After this call: - * Phy, Encoder, Timing Generator are programmed and enabled. - * New streams are enabled with blank stream; no memory read. - */ /* * Enable stereo when commit_streams is not required, * for example, frame alternate. diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index e219a1812360..614d7c27c759 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -1175,4 +1175,20 @@ enum mall_stream_type { SUBVP_MAIN, // subvp in use, this stream is main stream SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream }; + +enum dc_power_source_type { + DC_POWER_SOURCE_AC, // wall power + DC_POWER_SOURCE_DC, // battery power +}; + +struct dc_state_create_params { + enum dc_power_source_type power_source; +}; + +struct dc_commit_streams_params { + struct dc_stream_state **streams; + uint8_t stream_count; + enum dc_power_source_type power_source; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 20fd5b8e5117..3760d67af7d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -215,6 +215,8 @@ struct dml2_configuration_options { unsigned int max_segments_per_hubp; unsigned int det_segment_size; bool map_dc_pipes_with_callbacks; + + bool use_clock_dc_limits; }; /* diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 3f6876aafee0..3fab7c5bf093 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -518,6 +518,7 @@ struct bw_context { union bw_output bw; struct display_mode_lib dml; struct dml2_context *dml2; + struct dml2_context *dml2_dc_power_source; }; struct dc_dmub_cmd { @@ -606,6 +607,8 @@ struct dc_state { struct { unsigned int stutter_period_us; } perf_params; + + enum dc_power_source_type power_source; }; struct replay_context { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 61fd0f2e69fe..e2bff9b9d55a 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1799,7 +1799,9 @@ bool dcn32_validate_bandwidth(struct dc *dc, bool out = false; if (dc->debug.using_dml2) - out = dml2_validate(dc, context, context->bw_ctx.dml2, fast_validate); + out = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate); else out = dml1_validate(dc, context, fast_validate); return out; @@ -1997,10 +1999,20 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { + struct dml2_configuration_options dml2_opt = dc->dml2_options; + DC_FP_START(); + dcn32_update_bw_bounding_box_fpu(dc, bw_params); + + dml2_opt.use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2); + + dml2_opt.use_clock_dc_limits = true; + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) + dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + DC_FP_END(); } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index f7339b141939..3816678b044f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1579,10 +1579,20 @@ static struct dc_cap_funcs cap_funcs = { static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { + struct dml2_configuration_options dml2_opt = dc->dml2_options; + DC_FP_START(); + dcn321_update_bw_bounding_box_fpu(dc, bw_params); + + dml2_opt.use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2); + + dml2_opt.use_clock_dc_limits = true; + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) + dml2_reinit(dc, &dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + DC_FP_END(); } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index e0544484e8c0..25ac450944e7 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -1734,7 +1734,9 @@ static bool dcn35_validate_bandwidth(struct dc *dc, { bool out = false; - out = dml2_validate(dc, context, context->bw_ctx.dml2, fast_validate); + out = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate); if (fast_validate) return out; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 690e1ade361e..66ab6aba26aa 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1714,7 +1714,9 @@ static bool dcn351_validate_bandwidth(struct dc *dc, { bool out = false; - out = dml2_validate(dc, context, context->bw_ctx.dml2, fast_validate); + out = dml2_validate(dc, context, + context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, + fast_validate); if (fast_validate) return out; -- cgit v1.2.3-70-g09d2 From cc5209647f213ffabca1528da2f88a589059ac0e Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Tue, 19 Mar 2024 14:24:51 -0400 Subject: drm/amd/display: move build test pattern params as part of pipe resource update for odm [why] Move built test pattern as part of pipe resource update for odm to ensure we rebuild test pattern params every time we have an ODM update Reviewed-by: George Shen Acked-by: Roman Li Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 24 ++++++++++++++-------- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 7 +++++++ .../gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 8 ++++++++ .../amd/display/dc/dml2/dml2_dc_resource_mgmt.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h | 1 + 5 files changed, 37 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index f44025eacc0a..5a93278fa246 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3235,14 +3235,6 @@ static bool update_planes_and_stream_state(struct dc *dc, BREAK_TO_DEBUGGER(); goto fail; } - - for (i = 0; i < context->stream_count; i++) { - struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx, - context->streams[i]); - - if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) - resource_build_test_pattern_params(&context->res_ctx, otg_master); - } } update_seamless_boot_flags(dc, context, surface_count, stream); @@ -3338,12 +3330,26 @@ static void commit_planes_do_stream_update(struct dc *dc, } if (stream_update->pending_test_pattern) { - dc_link_dp_set_test_pattern(stream->link, + /* + * test pattern params depends on ODM topology + * changes that we could be applying to front + * end. Since at the current stage front end + * changes are not yet applied. We can only + * apply test pattern in hw based on current + * state and populate the final test pattern + * params in new state. If current and new test + * pattern params are different as result of + * different ODM topology being used, it will be + * detected and handle during front end + * programming update. + */ + dc->link_srv->dp_set_test_pattern(stream->link, stream->test_pattern.type, stream->test_pattern.color_space, stream->test_pattern.p_link_settings, stream->test_pattern.p_custom_pattern, stream->test_pattern.cust_pattern_size); + resource_build_test_pattern_params(&context->res_ctx, pipe_ctx); } if (stream_update->dpms_off) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c4a3484554b0..d48a181d2249 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1457,6 +1457,9 @@ void resource_build_test_pattern_params(struct resource_context *res_ctx, controller_color_space = convert_dp_to_controller_color_space( otg_master->stream->test_pattern.color_space); + if (controller_test_pattern == CONTROLLER_DP_TEST_PATTERN_VIDEOMODE) + return; + odm_cnt = resource_get_opp_heads_for_otg_master(otg_master, res_ctx, opp_heads); odm_slice_width = h_active / odm_cnt; @@ -2325,6 +2328,9 @@ static bool update_pipe_params_after_odm_slice_count_change( if (pool->funcs->build_pipe_pix_clk_params) pool->funcs->build_pipe_pix_clk_params(otg_master); + + if (otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) + resource_build_test_pattern_params(&context->res_ctx, otg_master); return result; } @@ -5082,6 +5088,7 @@ void resource_init_common_dml2_callbacks(struct dc *dc, struct dml2_configuratio { dml2_options->callbacks.dc = dc; dml2_options->callbacks.build_scaling_params = &resource_build_scaling_params; + dml2_options->callbacks.build_test_pattern_params = &resource_build_test_pattern_params; dml2_options->callbacks.acquire_secondary_pipe_for_mpc_odm = &dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy; dml2_options->callbacks.update_pipes_for_stream_with_slice_count = &resource_update_pipes_for_stream_with_slice_count; dml2_options->callbacks.update_pipes_for_plane_with_slice_count = &resource_update_pipes_for_plane_with_slice_count; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 8c0e1ab29aa9..9e6498d2439d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2115,6 +2115,14 @@ static bool dcn32_apply_merge_split_flags_helper( return false; } } + + for (i = 0; i < context->stream_count; i++) { + struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx, + context->streams[i]); + + if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) + resource_build_test_pattern_params(&context->res_ctx, otg_master); + } } return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c index 27d9da8ad7c1..237b67de62c1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c @@ -1078,6 +1078,12 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s ASSERT(false); } } + + if (ctx->config.callbacks.build_test_pattern_params && + pipe->stream && + pipe->prev_odm_pipe == NULL && + pipe->top_pipe == NULL) + ctx->config.callbacks.build_test_pattern_params(&state->res_ctx, pipe); } return true; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 3760d67af7d5..4a8bd2f4195e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -71,6 +71,7 @@ struct dml2_dcn_clocks { struct dml2_dc_callbacks { struct dc *dc; bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx); + void (*build_test_pattern_params)(struct resource_context *res_ctx, struct pipe_ctx *otg_master); bool (*can_support_mclk_switch_using_fw_based_vblank_stretch)(struct dc *dc, struct dc_state *context); bool (*acquire_secondary_pipe_for_mpc_odm)(const struct dc *dc, struct dc_state *state, struct pipe_ctx *pri_pipe, struct pipe_ctx *sec_pipe, bool odm); bool (*update_pipes_for_stream_with_slice_count)( -- cgit v1.2.3-70-g09d2 From aece2094e34e602f37403dda028f464bb41da50c Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Tue, 12 Mar 2024 14:40:06 -0400 Subject: drm/amd/display: Fix compiler warnings on high compiler warning levels [why] Enabling higher compiler warning levels results in many issues that can be trivially resolved as well as some potentially critical issues. [how] Fix all compiler warnings found with various compilers and higher warning levels. Primarily, potentially uninitialized variables and unreachable code. Reviewed-by: Leo Li Acked-by: Roman Li Signed-off-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 2 -- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 3 ++- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c | 3 ++- .../gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c | 3 ++- .../gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c | 3 ++- drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 2 +- drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 2 +- drivers/gpu/drm/amd/display/dc/dce/dce_transform.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 1 - .../gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c | 2 +- .../gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c | 1 - drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c | 2 -- .../gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c | 1 - .../gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c | 1 - drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c | 2 +- .../drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c | 1 - .../amd/display/dc/dml/dcn32/display_mode_vba_util_32.c | 14 ++++++-------- drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c | 2 +- drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c | 2 +- drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c | 12 +++++------- drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c | 2 +- drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c | 8 ++++---- drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c | 2 +- drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c | 2 +- drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h | 2 +- .../gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c | 2 +- drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 2 +- .../amd/display/dc/link/protocols/link_dp_training_dpia.c | 4 ++-- .../drm/amd/display/dc/resource/dce112/dce112_resource.c | 2 -- .../drm/amd/display/dc/resource/dce120/dce120_resource.c | 2 +- .../gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c | 2 +- .../gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c | 2 +- .../drm/amd/display/dc/resource/dcn316/dcn316_resource.c | 2 +- drivers/gpu/drm/amd/display/modules/color/color_gamma.c | 2 +- drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c | 2 +- 35 files changed, 44 insertions(+), 55 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 05f392501c0a..517c976dbc19 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1594,8 +1594,6 @@ static bool bios_parser_is_device_id_supported( return (le16_to_cpu(bp->object_info_tbl.v1_5->supporteddevices) & mask) != 0; break; } - - return false; } static uint32_t bios_parser_get_ss_entry_number( diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index f65bb4c21b7d..5ef0879f6ad9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -642,7 +642,8 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params j = -1; - ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported FCLK DPM levels exceed maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index aa9fd1dc550a..191d8b969d19 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -566,7 +566,8 @@ static void vg_clk_mgr_helper_populate_bw_params( j = -1; - ASSERT(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(VG_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported FCLK DPM levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index ce1386e22576..12a7752758b8 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -562,7 +562,8 @@ static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk j = -1; - ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported pstate levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 12f3e8aa46d8..e4ed888f8403 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -480,7 +480,8 @@ static void dcn316_clk_mgr_helper_populate_bw_params( j = -1; - ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL); + static_assert(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL, + "number of reported pstate levels exceeds maximum"); /* Find lowest DPM, FCLK is filled in reverse order*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 3b1b324c0824..df0f23afc8bb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -558,7 +558,7 @@ void hwss_build_fast_sequence(struct dc *dc, struct dc_dmub_cmd *dc_dmub_cmd, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], - int *num_steps, + unsigned int *num_steps, struct pipe_ctx *pipe_ctx, struct dc_stream_status *stream_status) { diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 34d199b08dd9..4c98cd066b8f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1316,7 +1316,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv; - uint32_t rcg_exit_count, ips1_exit_count, ips2_exit_count; + uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0; if (dc->debug.dmcub_emulation) return; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index 670d5ab9d998..2b1673d69ea8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -1408,7 +1408,7 @@ void dce110_opp_set_csc_default( static void program_pwl(struct dce_transform *xfm_dce, const struct pwl_params *params) { - int retval; + uint32_t retval; uint8_t max_tries = 10; uint8_t counter = 0; uint32_t i = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index 377f1ba1a81b..4d0eed7598b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -1439,7 +1439,6 @@ enum signal_type dcn10_get_dig_mode( default: return SIGNAL_TYPE_NONE; } - return SIGNAL_TYPE_NONE; } void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index c831f6c69a17..fbcd6f7bc993 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -382,7 +382,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context, { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting; for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index 6e6ae3de08e4..87eab924ecaf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -80,7 +80,6 @@ enum signal_type dcn35_get_dig_mode( default: return SIGNAL_TYPE_NONE; } - return SIGNAL_TYPE_NONE; } void dcn35_link_encoder_setup( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index 9c93a9f1eb0c..25d46c69464f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1102,8 +1102,6 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc } else { return DCN_ZSTATE_SUPPORT_DISALLOW; } - - return DCN_ZSTATE_SUPPORT_DISALLOW; } static void dcn20_adjust_freesync_v_startup( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index e7f4a2d491cc..e0b52db2c210 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -3535,7 +3535,6 @@ static double TruncToValidBPP( return DesiredBPP; } } - return BPP_INVALID; } void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index adea459e7d36..33cf824c5da1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3679,7 +3679,6 @@ static double TruncToValidBPP( return DesiredBPP; } } - return BPP_INVALID; } static noinline void CalculatePrefetchSchedulePerPlane( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index fb21572750e8..21f637ae4add 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -310,7 +310,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; const unsigned int max_allowed_vblank_nom = 1023; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c index 88e56889a68c..3242957d00c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c @@ -3788,7 +3788,6 @@ static double TruncToValidBPP( return DesiredBPP; } } - return BPP_INVALID; } static noinline void CalculatePrefetchSchedulePerPlane( diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index 80fccd4999a5..76399c66bc3b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -1650,6 +1650,8 @@ double dml32_TruncToValidBPP( MaxLinkBPP = 2 * MaxLinkBPP; } + *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1); + if (DesiredBPP == 0) { if (DSCEnable) { if (MaxLinkBPP < MinDSCBPP) @@ -1676,10 +1678,6 @@ double dml32_TruncToValidBPP( else return DesiredBPP; } - - *RequiredSlots = dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1); - - return BPP_INVALID; } // TruncToValidBPP double dml32_RequiredDTBCLK( @@ -4291,7 +4289,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( unsigned int i, j, k; unsigned int SurfaceWithMinActiveFCLKChangeMargin = 0; unsigned int DRAMClockChangeSupportNumber = 0; - unsigned int LastSurfaceWithoutMargin; + unsigned int LastSurfaceWithoutMargin = 0; unsigned int DRAMClockChangeMethod = 0; bool FoundFirstSurfaceWithMinActiveFCLKChangeMargin = false; double MinActiveFCLKChangeMargin = 0.; @@ -5656,9 +5654,9 @@ void dml32_CalculateStutterEfficiency( double LastZ8StutterPeriod = 0.0; double LastStutterPeriod = 0.0; unsigned int TotalNumberOfActiveOTG = 0; - double doublePixelClock; - unsigned int doubleHTotal; - unsigned int doubleVTotal; + double doublePixelClock = 0; + unsigned int doubleHTotal = 0; + unsigned int doubleVTotal = 0; bool SameTiming = true; double DETBufferingTimeY; double SwathWidthYCriticalSurface = 0.0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 714c2fe03c5f..60f251cf973b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -439,7 +439,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; const unsigned int max_allowed_vblank_nom = 1023; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index b3ffab77cf88..5abf120ff075 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -474,7 +474,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; const unsigned int max_allowed_vblank_nom = 1023; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 9184772d2e38..470dca3950c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -2784,6 +2784,8 @@ static dml_float_t TruncToValidBPP( } } + *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1)); + if (DesiredBPP == 0) { if (DSCEnable) { if (MaxLinkBPP < MinDSCBPP) { @@ -2812,10 +2814,6 @@ static dml_float_t TruncToValidBPP( return DesiredBPP; } } - - *RequiredSlots = (dml_uint_t)(dml_ceil(DesiredBPP / MaxLinkBPP * 64, 1)); - - return __DML_DPP_INVALID__; } // TruncToValidBPP static void CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( @@ -3792,9 +3790,9 @@ static void CalculateStutterEfficiency(struct display_mode_lib_scratch_st *scrat dml_bool_t FoundCriticalSurface = false; dml_uint_t TotalNumberOfActiveOTG = 0; - dml_float_t SinglePixelClock; - dml_uint_t SingleHTotal; - dml_uint_t SingleVTotal; + dml_float_t SinglePixelClock = 0; + dml_uint_t SingleHTotal = 0; + dml_uint_t SingleVTotal = 0; dml_bool_t SameTiming = true; dml_float_t LastStutterPeriod = 0.0; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index bedf1cd390df..81b4e08f8098 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -224,7 +224,7 @@ static int find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane, unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id) { - int i, j; + unsigned int i, j; bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists; if (!plane_id) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index ac41f9c0a283..a15b21edb0cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -458,7 +458,7 @@ bool dc_dsc_compute_bandwidth_range( bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; struct dsc_enc_caps dsc_common_caps; - struct dc_dsc_config config; + struct dc_dsc_config config = {0}; struct dc_dsc_config_options options = {0}; options.dsc_min_slice_height_override = dsc_min_slice_height_override; @@ -868,9 +868,9 @@ static bool setup_dsc_config( struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; - int max_slices_h; - int min_slices_h; - int num_slices_h; + int max_slices_h = 0; + int min_slices_h = 0; + int num_slices_h = 0; int pic_width; int slice_width; int target_bpp; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 17b404cb1155..5920d1825a4c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -2188,7 +2188,7 @@ static void dce110_setup_audio_dto( struct dc *dc, struct dc_state *context) { - int i; + unsigned int i; /* program audio wall clock. use HDMI as clock source if HDMI * audio active. Otherwise, use DP as clock source diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 774605a029e8..40391dd16944 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -885,7 +885,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable) { union dmub_rb_cmd cmd; uint32_t tmr_delay = 0, tmr_scale = 0; - struct dc_cursor_attributes cursor_attr; + struct dc_cursor_attributes cursor_attr = {0}; bool cursor_cache_enable = false; struct dc_stream_state *stream = NULL; struct dc_plane_state *plane = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 2368fad61b41..659ce11ad446 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -480,7 +480,7 @@ void hwss_build_fast_sequence(struct dc *dc, struct dc_dmub_cmd *dc_dmub_cmd, unsigned int dmub_cmd_count, struct block_sequence block_sequence[], - int *num_steps, + unsigned int *num_steps, struct pipe_ctx *pipe_ctx, struct dc_stream_status *stream_status); diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 22b24749c9d2..8d1a1cc94a8b 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -884,7 +884,7 @@ void dp_set_preferred_link_settings(struct dc *dc, { int i; struct pipe_ctx *pipe; - struct dc_stream_state *link_stream; + struct dc_stream_state *link_stream = 0; struct dc_link_settings store_settings = *link_setting; link->preferred_link_setting = store_settings; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 9c0e2b9cffc9..ce68476e69d5 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -127,7 +127,7 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) if (link->ep_type == DISPLAY_ENDPOINT_PHY && link->link_enc->funcs->get_dig_frontend && link->link_enc->funcs->is_dig_enabled(link->link_enc)) { - unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); + int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); if (fe != ENGINE_ID_UNKNOWN) for (j = 0; j < dc->res_pool->stream_enc_count; j++) { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index 5d36bab0029c..edb21d21952a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -291,7 +291,7 @@ static enum link_training_result dpia_training_cr_non_transparent( { enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ - enum dc_status status; + enum dc_status status = DC_ERROR_UNEXPECTED; uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ uint32_t retry_count = 0; uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */ @@ -617,7 +617,7 @@ static enum link_training_result dpia_training_eq_non_transparent( enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ uint32_t retries_eq = 0; - enum dc_status status; + enum dc_status status = DC_ERROR_UNEXPECTED; enum dc_dp_training_pattern tr_pattern; uint32_t wait_time_microsec = 0; enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index d1edac46c9a0..88afb2a30eef 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -864,8 +864,6 @@ static struct clock_source *find_matching_pll( default: return NULL; } - - return NULL; } static enum dc_status build_mapped_resource( diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c index 20662edd0ae4..621825a51f46 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c @@ -1060,7 +1060,7 @@ static bool dce120_resource_construct( struct irq_service_init_data irq_init_data; static const struct resource_create_funcs *res_funcs; bool is_vg20 = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); - uint32_t pipe_fuses; + uint32_t pipe_fuses = 0; ctx->dc_bios->regs = &bios_regs; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index ecc477ef8e3b..237e00ea3e94 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -1639,7 +1639,7 @@ noinline bool dcn30_internal_validate_bw( int split[MAX_PIPES] = { 0 }; bool merge[MAX_PIPES] = { false }; bool newly_split[MAX_PIPES] = { false }; - int pipe_cnt, i, pipe_idx, vlevel; + int pipe_cnt, i, pipe_idx, vlevel = 0; struct vba_vars_st *vba = &context->bw_ctx.dml.vba; ASSERT(pipes); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index ce3699f62602..ecec3b69bb88 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -1644,7 +1644,7 @@ int dcn31_populate_dml_pipes_from_context( { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; bool upscaled = false; DC_FP_START(); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 83a71f1b933d..e808231e8478 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -1613,7 +1613,7 @@ static int dcn316_populate_dml_pipes_from_context( { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; - struct pipe_ctx *pipe; + struct pipe_ctx *pipe = 0; const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB; DC_FP_START(); diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 8b5c27857671..3699e633801d 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1059,7 +1059,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, struct fixed31_32 min_display; struct fixed31_32 max_content; struct fixed31_32 clip = dc_fixpt_one; - struct fixed31_32 output; + struct fixed31_32 output = dc_fixpt_zero; bool use_eetf = false; bool is_clipped = false; struct fixed31_32 sdr_white_level; diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 733f22bed021..c996365e84b0 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -151,7 +151,7 @@ out: static enum mod_hdcp_status poll_l_prime_available(struct mod_hdcp *hdcp) { - enum mod_hdcp_status status; + enum mod_hdcp_status status = MOD_HDCP_STATUS_FAILURE; uint8_t size; uint16_t max_wait = 20; // units of ms uint16_t num_polls = 5; -- cgit v1.2.3-70-g09d2 From 14813934b6290ec79c063923cb4ed131e10b402f Mon Sep 17 00:00:00 2001 From: Roman Li Date: Fri, 22 Mar 2024 14:32:19 -0400 Subject: drm/amd/display: Allow RCG for Static Screen + LVP for DCN35 [Why] We want to block IPS2 for static screen but allow it for power state transitions. [How] Set DalDisableIPS=6 for DCN35 which allows: 1. RCG during static screen 2. RCG during LVP 3. IPS2 for display off / S0i3 Reviewed-by: Nicholas Kazlauskas Acked-by: Roman Li Signed-off-by: Nicholas Kazlauskas Signed-off-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index a032506f1588..e80a75312cf7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1726,8 +1726,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_IPS) init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL; + else + init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF; - init_data.flags.disable_ips_in_vpb = 1; + init_data.flags.disable_ips_in_vpb = 0; /* Enable DWB for tested platforms only */ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) -- cgit v1.2.3-70-g09d2 From 7315a4fb1532d748505076b543ac1461f8d20700 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 24 Mar 2024 20:17:59 -0400 Subject: drm/amd/display: 3.2.279 This version pairs with DMUB FW Release 0.0.211.0 for dcn314, dcn35, dcn351 and brings along the following: - Fix underflow in subvp/non-subvp configs - Fix compiler warnings - Add handling for DC power mode - Add extra logging for DMUB, HUBP and OTG - Add timing pixel encoding for mst mode validation - Expand supported Replay residency mode - Allow HPO PG and RCG for DCN35 - Update pipe topology log to support subvp - Disable Z8 minimum stutter period check for DCN35 - Enable RCO for HDMISTREAMCLK in DCN35 Acked-by: Roman Li Signed-off-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 4d087137de3b..46be4845ec28 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -51,7 +51,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.278" +#define DC_VER "3.2.279" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3-70-g09d2 From a96e384cbbc21bf537e4ae75c5306a559ede5a5e Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 21 Mar 2024 14:30:45 -0600 Subject: drm/amd/display: Initialize DP ref clk with the correct clock Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index b77804cfde0f..2a5dd3a296b2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -131,8 +131,8 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); int dprefclk_wdivider; int dprefclk_src_sel; - int dp_ref_clk_khz; - int target_div = 600000; + int dp_ref_clk_khz = 600000; + int target_div; /* ASSERT DP Reference Clock source is from DFS*/ REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel); -- cgit v1.2.3-70-g09d2 From 916635b98d4edd7bbafdde56c66c65f9175e28ff Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 21 Mar 2024 15:56:37 -0600 Subject: drm/amd/display: Set alpha enable to 0 for some specific formats Set alpha_en to 0 in some specific color formats. Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index 58095ac75506..f8c0cee34080 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -293,9 +293,11 @@ void dpp3_cnv_setup ( break; case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX: pixel_format = 112; + alpha_en = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX: pixel_format = 113; + alpha_en = 0; break; case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010: pixel_format = 114; @@ -319,9 +321,11 @@ void dpp3_cnv_setup ( break; case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT: pixel_format = 118; + alpha_en = 0; break; case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT: pixel_format = 119; + alpha_en = 0; break; default: break; -- cgit v1.2.3-70-g09d2 From 002001b092dd662ab79fcedcdd96c037cf0213d6 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 21 Mar 2024 15:57:49 -0600 Subject: drm/amd/display: Enable cur_rom_en even if cursor degamma is not enabled Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index f8c0cee34080..ed1e2f65f5b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -395,9 +395,7 @@ void dpp3_set_cursor_attributes( if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { - cur_rom_en = 1; - } + cur_rom_en = 1; } REG_UPDATE_3(CURSOR0_CONTROL, -- cgit v1.2.3-70-g09d2 From e7927b2914d40cff609b43a1cf9077e6737a2e27 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 21 Mar 2024 15:51:44 -0600 Subject: drm/amd/display: Add some missing debug registers Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h | 4 ++++ .../amd/include/asic_reg/dcn/dcn_3_0_0_offset.h | 24 ++++++++++++++++++++++ .../amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h | 9 ++++++++ .../amd/include/asic_reg/dcn/dcn_3_0_2_offset.h | 20 ++++++++++++++++++ .../amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h | 8 ++++++++ .../amd/include/asic_reg/dcn/dcn_3_0_3_offset.h | 8 ++++++++ .../amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h | 7 +++++++ .../amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h | 5 +++++ 8 files changed, 85 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h index 2ac8045a87a1..269f437c1633 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h @@ -132,6 +132,8 @@ SRI(CM_POST_CSC_B_C33_C34, CM, id), \ SRI(CM_MEM_PWR_CTRL, CM, id), \ SRI(CM_CONTROL, CM, id), \ + SRI(CM_TEST_DEBUG_INDEX, CM, id), \ + SRI(CM_TEST_DEBUG_DATA, CM, id), \ SRI(FORMAT_CONTROL, CNVC_CFG, id), \ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ @@ -294,6 +296,7 @@ TF_SF(CM0_CM_POST_CSC_C11_C12, CM_POST_CSC_C12, mask_sh), \ TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C33, mask_sh), \ TF_SF(CM0_CM_POST_CSC_C33_C34, CM_POST_CSC_C34, mask_sh), \ + TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ @@ -426,6 +429,7 @@ type CM_GAMCOR_LUT_DATA; \ type CM_GAMCOR_LUT_WRITE_COLOR_MASK; \ type CM_GAMCOR_LUT_READ_COLOR_SEL; \ + type CM_GAMCOR_LUT_READ_DBG; \ type CM_GAMCOR_LUT_HOST_SEL; \ type CM_GAMCOR_LUT_CONFIG_MODE; \ type CM_GAMCOR_LUT_STATUS; \ diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h index f2f8f9b39c6b..b5bfaa64a9db 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h @@ -4513,6 +4513,10 @@ #define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18 #define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19 +#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a +#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5201,6 +5205,10 @@ #define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83 #define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84 +#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85 +#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5888,6 +5896,10 @@ #define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10ee #define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_INDEX 0x10ef +#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_DATA 0x10f0 +#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -6576,6 +6588,10 @@ #define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1259 #define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_INDEX 0x125a +#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_DATA 0x125b +#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -7264,6 +7280,10 @@ #define mmCM4_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM4_CM_3DLUT_OUT_OFFSET_B 0x13c4 #define mmCM4_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM4_CM_TEST_DEBUG_INDEX 0x13c5 +#define mmCM4_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM4_CM_TEST_DEBUG_DATA 0x13c6 +#define mmCM4_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp4_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -7952,6 +7972,10 @@ #define mmCM5_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM5_CM_3DLUT_OUT_OFFSET_B 0x152f #define mmCM5_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM5_CM_TEST_DEBUG_INDEX 0x1530 +#define mmCM5_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM5_CM_TEST_DEBUG_DATA 0x1531 +#define mmCM5_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp5_dispdec_dpp_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h index e0a447351623..a2ab7f9efc83 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h @@ -16739,6 +16739,15 @@ #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10 #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L +//CM0_CM_TEST_DEBUG_DATA +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL + // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec //DC_PERFMON12_PERFCOUNTER_CNTL diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h index b45a35aae241..bf84f97d9162 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h @@ -4466,6 +4466,10 @@ #define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18 #define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19 +#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a +#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5154,6 +5158,10 @@ #define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83 #define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84 +#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85 +#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -5841,6 +5849,10 @@ #define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10ee #define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_INDEX 0x10ef +#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_DATA 0x10f0 +#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -6529,6 +6541,10 @@ #define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1259 #define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_INDEX 0x125a +#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_DATA 0x125b +#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -7217,6 +7233,10 @@ #define mmCM4_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM4_CM_3DLUT_OUT_OFFSET_B 0x13c4 #define mmCM4_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM4_CM_TEST_DEBUG_INDEX 0x13c5 +#define mmCM4_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM4_CM_TEST_DEBUG_DATA 0x13c6 +#define mmCM4_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp4_dispdec_dpp_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h index 3dae29f9581e..56cdb219874a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h @@ -15676,6 +15676,14 @@ #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10 #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL #define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L +//CM0_CM_TEST_DEBUG_DATA +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h index daa8130636f0..e90f31bbb4d2 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h @@ -3110,6 +3110,10 @@ #define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0e18 #define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_INDEX 0x0e19 +#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_DATA 0x0e1a +#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -3798,6 +3802,10 @@ #define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2 #define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f83 #define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f84 +#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_DATA 0x0f85 +#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h index 5c469cf635e5..3be176174b20 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h @@ -10701,6 +10701,13 @@ #define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L #define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L #define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L + +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L + //CM0_CM_SHAPER_CONTROL #define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0 #define CM0_CM_SHAPER_CONTROL__CM_SHAPER_MODE_CURRENT__SHIFT 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h index 0691e328d0f0..d70518307a34 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h @@ -11544,6 +11544,11 @@ #define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L #define CM0_CM_COEF_FORMAT__CM_POST_CSC_COEF_FORMAT_MASK 0x00000010L #define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L // addressBlock: dcn_dc_dpp0_dispdec_dpp_top_dispdec -- cgit v1.2.3-70-g09d2 From ffa6d7ab1b4acae9cab8adfd0ad340c74a8589c5 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 21 Mar 2024 18:20:38 -0600 Subject: drm/amd/display: Update DSC compute parameter calculation Adjust bytes per pixel calculation to use div_u64. Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 36d6c1646a51..59864130cf83 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -101,7 +101,6 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, { int ret; struct drm_dsc_config dsc_cfg; - unsigned long long tmp; dsc_params->pps = *pps; dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset); @@ -112,9 +111,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64; ret = drm_dsc_compute_rc_parameters(&dsc_cfg); - tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1); - do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP - dsc_params->bytes_per_pixel = (uint32_t)tmp; + dsc_params->bytes_per_pixel = + (uint32_t)(div_u64(((uint64_t)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1)), + (uint32_t)dsc_cfg.slice_width)); /* Round-up */ copy_pps_fields(&dsc_params->pps, &dsc_cfg); dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits; -- cgit v1.2.3-70-g09d2 From 2a2f97e5f46cdb949f967bea7b2c9e514692a392 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 21 Mar 2024 18:23:42 -0600 Subject: drm/amd/display: Drop legacy code Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 10 ---------- .../gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c | 13 ------------- 2 files changed, 23 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 1cb7765f593a..b7ad64e7b62a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -917,16 +917,6 @@ struct dpcd_usb4_dp_tunneling_info { uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; }; -#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT -#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3 -#endif -#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0 -#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230 -#endif -#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250 -#endif - union dp_main_line_channel_coding_cap { struct { uint8_t DP_8b_10b_SUPPORTED :1; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c index d734e3a134d1..2840ed5c57d8 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c @@ -95,10 +95,6 @@ static bool offset_to_id( return true; default: ASSERT_CRITICAL(false); -#ifdef PALLADIUM_SUPPORTED - *en = GPIO_DDC_LINE_DDC1; - return true; -#endif return false; } break; @@ -184,11 +180,6 @@ static bool offset_to_id( /* UNEXPECTED */ default: /* case REG(DC_GPIO_SYNCA_A): not exista */ -#ifdef PALLADIUM_SUPPORTED - *id = GPIO_ID_HPD; - *en = GPIO_DDC_LINE_DDC1; - return true; -#endif ASSERT_CRITICAL(false); return false; } @@ -308,10 +299,6 @@ static bool id_to_offset( break; default: ASSERT_CRITICAL(false); -#ifdef PALLADIUM_SUPPORTED - info->mask = DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK; - result = true; -#endif result = false; } break; -- cgit v1.2.3-70-g09d2 From be239684b18e1cdcafcf8c7face4a2f562c745ad Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 22 Mar 2024 13:56:26 -0600 Subject: drm/amd/display: Add missing registers Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- .../amd/include/asic_reg/dcn/dcn_3_0_3_offset.h | 20 ++++++++ .../amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h | 11 ++++ .../amd/include/asic_reg/dcn/dcn_3_1_2_offset.h | 4 ++ .../amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h | 19 +++++++ .../amd/include/asic_reg/dcn/dcn_3_2_0_offset.h | 60 ++++++++++++++++++++++ .../amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h | 22 ++++++++ .../amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h | 4 +- .../amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h | 10 ++++ 8 files changed, 149 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h index e90f31bbb4d2..8b0d2638a6b0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h @@ -5695,6 +5695,16 @@ #define mmDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define mmDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035 #define mmDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define mmDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a +#define mmDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define mmDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b +#define mmDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define mmDSCC0_DSCC_TEST_DEBUG_DATA1 0x303c +#define mmDSCC0_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define mmDSCC0_DSCC_TEST_DEBUG_DATA2 0x303d +#define mmDSCC0_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define mmDSCC0_DSCC_TEST_DEBUG_DATA3 0x303e +#define mmDSCC0_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec @@ -5825,6 +5835,16 @@ #define mmDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define mmDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091 #define mmDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define mmDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096 +#define mmDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA1 0x3098 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA2 0x3099 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define mmDSCC1_DSCC_TEST_DEBUG_DATA3 0x309a +#define mmDSCC1_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dce_dc_dsc1_dispdec_dsc_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h index 3be176174b20..53f1705f8d99 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h @@ -22265,7 +22265,9 @@ #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L //DSC_TOP0_DSC_DEBUG_CONTROL #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L // addressBlock: dce_dc_dsc0_dispdec_dsccif_dispdec @@ -22638,6 +22640,15 @@ //DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0 #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL +//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L // addressBlock: dce_dc_dsc0_dispdec_dsc_dcperfmon_dc_perfmon_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h index f268d33c4744..7fd906f10803 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h @@ -424,6 +424,8 @@ #define regDTBCLK_DTO2_MODULO_BASE_IDX 2 #define regDTBCLK_DTO3_MODULO 0x0022 #define regDTBCLK_DTO3_MODULO_BASE_IDX 2 +#define regHDMICHARCLK0_CLOCK_CNTL 0x004a +#define regHDMICHARCLK0_CLOCK_CNTL_BASE_IDX 2 #define regPHYASYMCLK_CLOCK_CNTL 0x0052 #define regPHYASYMCLK_CLOCK_CNTL_BASE_IDX 2 #define regPHYBSYMCLK_CLOCK_CNTL 0x0053 @@ -434,6 +436,8 @@ #define regPHYDSYMCLK_CLOCK_CNTL_BASE_IDX 2 #define regPHYESYMCLK_CLOCK_CNTL 0x0056 #define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2 +#define regHDMISTREAMCLK_CNTL 0x0059 +#define regHDMISTREAMCLK_CNTL_BASE_IDX 2 #define regDCCG_GATE_DISABLE_CNTL3 0x005a #define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2 #define regHDMISTREAMCLK0_DTO_PARAM 0x005b diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h index cf3398f15666..07fbfafe6056 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h @@ -1372,6 +1372,11 @@ //DTBCLK_DTO3_MODULO #define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO__SHIFT 0x0 #define DTBCLK_DTO3_MODULO__DTBCLK_DTO3_MODULO_MASK 0xFFFFFFFFL +//HDMICHARCLK0_CLOCK_CNTL +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN__SHIFT 0x0 +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL__SHIFT 0x4 +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_EN_MASK 0x00000001L +#define HDMICHARCLK0_CLOCK_CNTL__HDMICHARCLK0_SRC_SEL_MASK 0x00000070L //PHYASYMCLK_CLOCK_CNTL #define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_EN__SHIFT 0x0 #define PHYASYMCLK_CLOCK_CNTL__PHYASYMCLK_FORCE_SRC_SEL__SHIFT 0x4 @@ -1397,6 +1402,13 @@ #define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL__SHIFT 0x4 #define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_EN_MASK 0x00000001L #define PHYESYMCLK_CLOCK_CNTL__PHYESYMCLK_FORCE_SRC_SEL_MASK 0x00000030L +//HDMISTREAMCLK_CNTL +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL__SHIFT 0x0 +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN__SHIFT 0x3 +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS__SHIFT 0x4 +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_SRC_SEL_MASK 0x00000007L +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_EN_MASK 0x00000008L +#define HDMISTREAMCLK_CNTL__HDMISTREAMCLK0_DTO_FORCE_DIS_MASK 0x00000010L //DCCG_GATE_DISABLE_CNTL3 #define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK0_GATE_DISABLE__SHIFT 0x0 #define DCCG_GATE_DISABLE_CNTL3__HDMISTREAMCLK1_GATE_DISABLE__SHIFT 0x1 @@ -46978,6 +46990,13 @@ #define DSC_TOP0_DSC_TOP_CONTROL__DSC_CLOCK_EN_MASK 0x00000001L #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DISPCLK_R_GATE_DIS_MASK 0x00000010L #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L + + +//DSC_TOP0_DSC_DEBUG_CONTROL +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L //DSC_TOP0_DSC_DEBUG_CONTROL #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h index 14c29ce4c7b3..78cb61d5800a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h @@ -1719,6 +1719,10 @@ #define regDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2 #define regFMON_CTRL 0x0541 #define regFMON_CTRL_BASE_IDX 2 +#define regDCHUBBUB_TEST_DEBUG_INDEX 0x0542 +#define regDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regDCHUBBUB_TEST_DEBUG_DATA 0x0543 +#define regDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dcn_dc_dchubbubl_hubbub_sdpif_dispdec @@ -3574,6 +3578,10 @@ #define regCM0_CM_DEALPHA_BASE_IDX 2 #define regCM0_CM_COEF_FORMAT 0x0d8c #define regCM0_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM0_CM_TEST_DEBUG_INDEX 0x0d8d +#define regCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM0_CM_TEST_DEBUG_DATA 0x0d8e +#define regCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dcn_dc_dpp0_dispdec_dpp_top_dispdec @@ -3960,6 +3968,10 @@ #define regCM1_CM_DEALPHA_BASE_IDX 2 #define regCM1_CM_COEF_FORMAT 0x0ef7 #define regCM1_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM1_CM_TEST_DEBUG_INDEX 0x0ef8 +#define regCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM1_CM_TEST_DEBUG_DATA 0x0ef9 +#define regCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dcn_dc_dpp1_dispdec_dpp_top_dispdec @@ -4346,6 +4358,10 @@ #define regCM2_CM_DEALPHA_BASE_IDX 2 #define regCM2_CM_COEF_FORMAT 0x1062 #define regCM2_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM2_CM_TEST_DEBUG_INDEX 0x1063 +#define regCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM2_CM_TEST_DEBUG_DATA 0x1064 +#define regCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dcn_dc_dpp2_dispdec_dpp_top_dispdec @@ -4732,6 +4748,10 @@ #define regCM3_CM_DEALPHA_BASE_IDX 2 #define regCM3_CM_COEF_FORMAT 0x11cd #define regCM3_CM_COEF_FORMAT_BASE_IDX 2 +#define regCM3_CM_TEST_DEBUG_INDEX 0x11ce +#define regCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define regCM3_CM_TEST_DEBUG_DATA 0x11cf +#define regCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dcn_dc_dpp3_dispdec_dpp_top_dispdec @@ -11780,6 +11800,16 @@ #define regDSCC0_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3035 #define regDSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE 0x303a +#define regDSCC0_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_DATA0 0x303b +#define regDSCC0_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_DATA1 0x303c +#define regDSCC0_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_DATA2 0x303d +#define regDSCC0_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define regDSCC0_DSCC_TEST_DEBUG_DATA3 0x303e +#define regDSCC0_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dcn_dc_dsc0_dispdec_dsccif_dispdec @@ -11888,6 +11918,16 @@ #define regDSCC1_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3091 #define regDSCC1_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE 0x3096 +#define regDSCC1_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_DATA0 0x3097 +#define regDSCC1_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_DATA1 0x3098 +#define regDSCC1_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_DATA2 0x3099 +#define regDSCC1_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define regDSCC1_DSCC_TEST_DEBUG_DATA3 0x309a +#define regDSCC1_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dcn_dc_dsc1_dispdec_dsccif_dispdec @@ -11996,6 +12036,16 @@ #define regDSCC2_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x30ed #define regDSCC2_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE 0x30f2 +#define regDSCC2_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_DATA0 0x30f3 +#define regDSCC2_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_DATA1 0x30f4 +#define regDSCC2_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_DATA2 0x30f5 +#define regDSCC2_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define regDSCC2_DSCC_TEST_DEBUG_DATA3 0x30f6 +#define regDSCC2_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dcn_dc_dsc2_dispdec_dsccif_dispdec @@ -12104,6 +12154,16 @@ #define regDSCC3_DSCC_RATE_CONTROL_BUFFER2_MAX_FULLNESS_LEVEL_BASE_IDX 2 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL 0x3149 #define regDSCC3_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE 0x314e +#define regDSCC3_DSCC_TEST_DEBUG_BUS_ROTATE_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_DATA0 0x314f +#define regDSCC3_DSCC_TEST_DEBUG_DATA0_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_DATA1 0x3150 +#define regDSCC3_DSCC_TEST_DEBUG_DATA1_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_DATA2 0x3151 +#define regDSCC3_DSCC_TEST_DEBUG_DATA2_BASE_IDX 2 +#define regDSCC3_DSCC_TEST_DEBUG_DATA3 0x3152 +#define regDSCC3_DSCC_TEST_DEBUG_DATA3_BASE_IDX 2 // addressBlock: dcn_dc_dsc3_dispdec_dsccif_dispdec diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h index d70518307a34..1093105ca35b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h @@ -42272,6 +42272,18 @@ //DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__SHIFT 0x0 #define DSCC0_DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL__DSCC_RATE_CONTROL_BUFFER3_MAX_FULLNESS_LEVEL_MASK 0x0003FFFFL +//DSCC0_DSCC_TEST_DEBUG_INDEX2 +#define DSCC0_DSCC_TEST_DEBUG_INDEX2__DSCC_TEST_DEBUG_INDEX2__SHIFT 0x0 +#define DSCC0_DSCC_TEST_DEBUG_INDEX2__DSCC_TEST_DEBUG_INDEX2_MASK 0x000000FFL +//DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE__SHIFT 0x0 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE__SHIFT 0x8 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE__SHIFT 0x10 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE__SHIFT 0x18 +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS0_ROTATE_MASK 0x0000001FL +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS1_ROTATE_MASK 0x00001F00L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS2_ROTATE_MASK 0x001F0000L +#define DSCC0_DSCC_TEST_DEBUG_BUS_ROTATE__DSCC_TEST_DEBUG_BUS3_ROTATE_MASK 0x1F000000L // addressBlock: dcn_dc_dsc0_dispdec_dsccif_dispdec @@ -42305,6 +42317,16 @@ #define DSC_TOP0_DSC_TOP_CONTROL__DSC_DSCCLK_R_GATE_DIS_MASK 0x00000100L //DSC_TOP0_DSC_DEBUG_CONTROL #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL_MASK 0x00000070L + + +//DSC_TOP0_DSC_DEBUG_CONTROL +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_TEST_CLOCK_MUX_SEL__SHIFT 0x4 +//DSC_TOP0_DSC_DEBUG_CONTROL +#define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN__SHIFT 0x0 #define DSC_TOP0_DSC_DEBUG_CONTROL__DSC_DBG_EN_MASK 0x00000001L diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h index 55743d06f728..e55ff0e8d74c 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h @@ -70,7 +70,9 @@ //DPCSTX0_DPCSTX_PLL_UPDATE_DATA #define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 #define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL - +//DPCSTX0_DPCSTX_DEBUG_CONFIG +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe +#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L // addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec //RDPCSTX0_RDPCSTX_CNTL diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h index 01a56556cde1..5b4fdeda1040 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h @@ -155,6 +155,8 @@ #define regRDPCSTX0_RDPCSTX_CNTL2_BASE_IDX 2 #define regRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c #define regRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define regRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d +#define regRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define regRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940 #define regRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define regRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941 @@ -239,6 +241,8 @@ #define regRDPCSTX1_RDPCSTX_CNTL2_BASE_IDX 2 #define regRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14 #define regRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define regRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15 +#define regRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define regRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18 #define regRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define regRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19 @@ -323,6 +327,8 @@ #define regRDPCSTX2_RDPCSTX_CNTL2_BASE_IDX 2 #define regRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec #define regRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define regRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed +#define regRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define regRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0 #define regRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define regRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1 @@ -407,6 +413,8 @@ #define regRDPCSTX3_RDPCSTX_CNTL2_BASE_IDX 2 #define regRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4 #define regRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define regRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5 +#define regRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define regRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8 #define regRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define regRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9 @@ -491,6 +499,8 @@ #define regRDPCSTX4_RDPCSTX_CNTL2_BASE_IDX 2 #define regRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c #define regRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 +#define regRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d +#define regRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 #define regRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0 #define regRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2 #define regRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1 -- cgit v1.2.3-70-g09d2 From 56198d6d5e3fb21d291a06f97f27cab750c3f81d Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 22 Mar 2024 14:46:19 -0600 Subject: drm/amd/display: Remove redundant RESERVE0 and RESERVE1 This commit drops the RESERVE0 and RESERVE1 since both of them can be summarized as RESERVED. Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index b7ad64e7b62a..b9f8eee5d2d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -1222,8 +1222,7 @@ union replay_enable_and_configuration { unsigned char FREESYNC_PANEL_REPLAY_MODE :1; unsigned char TIMING_DESYNC_ERROR_VERIFICATION :1; unsigned char STATE_TRANSITION_ERROR_DETECTION :1; - unsigned char RESERVED0 :1; - unsigned char RESERVED1 :4; + unsigned char RESERVED :5; } bits; unsigned char raw; }; -- cgit v1.2.3-70-g09d2 From ab0d29d9328930e443d7be3a5ee3a40270f64ea5 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 22 Mar 2024 16:22:02 -0600 Subject: drm/amd/display: Add missing SFB and OPP_SF Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h | 1 + drivers/gpu/drm/amd/display/dc/dce/dce_opp.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h index f98400efdd9b..e34e445a4013 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h @@ -181,6 +181,7 @@ struct dce_mem_input_registers { SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\ SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\ SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\ + SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\ SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\ SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\ SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h index bf1ffc3629c7..3d9be87aae45 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.h @@ -111,6 +111,7 @@ enum dce110_opp_reg_type { OPP_SF(FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh),\ OPP_SF(FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh),\ OPP_SF(FMT_DITHER_RAND_B_SEED, FMT_RAND_B_SEED, mask_sh),\ + OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_EN, mask_sh),\ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_RESET, mask_sh),\ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_OFFSET, mask_sh),\ OPP_SF(FMT_BIT_DEPTH_CONTROL, FMT_TEMPORAL_DITHER_DEPTH, mask_sh),\ -- cgit v1.2.3-70-g09d2 From 314a58b8aeeb2f8f72233178ed2e29f81b355355 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 22 Mar 2024 16:38:35 -0600 Subject: drm/amd/display: Initialize debug variable data Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index d9ade9ee0aeb..6dd355a03033 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -130,7 +130,7 @@ bool hubbub1_verify_allow_pstate_change_high( static unsigned int max_sampled_pstate_wait_us; /* data collection */ static bool forced_pstate_allow; /* help with revert wa */ - unsigned int debug_data; + unsigned int debug_data = 0; unsigned int i; if (forced_pstate_allow) { -- cgit v1.2.3-70-g09d2 From e9a48b6b957c81aeeda6d95604f1c53fa61ab1fd Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Wed, 28 Nov 2018 11:17:53 -0500 Subject: drm/amd/display: Fix MPCC DTN logging [Why] DTN only logs 'pipe_count' instances of MPCC. However in some cases there are different number of MPCC than DPP (pipe_count). [How] Add mpcc_count parameter to resource_pool and set it during pool construction and use it for DTN logging of MPCC state. Signed-off-by: Eric Bernstein Reviewed-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c index 9033b39e0e0c..c51b717e5622 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c @@ -392,7 +392,7 @@ static unsigned int dcn10_get_mpcc_states(struct dc *dc, char *pBuf, unsigned in remaining_buffer -= chars_printed; pBuf += chars_printed; - for (i = 0; i < pool->pipe_count; i++) { + for (i = 0; i < pool->mpcc_count; i++) { struct mpcc_state s = {0}; pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); -- cgit v1.2.3-70-g09d2 From 71fb6ed2e7bc1833e4d0faeb05c61c6df6f44c87 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 22 Mar 2024 16:49:55 -0600 Subject: drm/amd/display: Add WBSCL ram coefficient for writeback Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c index f8667be57046..80779e85e2c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c @@ -299,6 +299,17 @@ void dwb2_set_scaler(struct dwbc *dwbc, struct dc_dwb_params *params) } } + + if (dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL) { + /* Swap double buffered coefficient set */ + uint32_t wbscl_mode = REG_READ(WBSCL_MODE); + bool coef_ram_current = get_reg_field_value_ex( + wbscl_mode, dwbc20->dwbc_mask->WBSCL_COEF_RAM_SEL_CURRENT, + dwbc20->dwbc_shift->WBSCL_COEF_RAM_SEL_CURRENT); + + REG_UPDATE(WBSCL_MODE, WBSCL_COEF_RAM_SEL, !coef_ram_current); + } + } static const struct dwbc_funcs dcn20_dwbc_funcs = { -- cgit v1.2.3-70-g09d2 From f3736c0d979a6f9fe2017b864f106dd286a5c18c Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 22 Mar 2024 16:53:14 -0600 Subject: drm/amd/display: Add code comments clock and encode code This commit adds some comments to make easier to understand the clock update for DCN 201, the encode function, and other minor comments. Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c | 11 +++++++++++ drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c | 2 +- .../drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c | 4 ++++ drivers/gpu/drm/amd/display/dc/resource/Makefile | 6 ------ 4 files changed, 16 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c index 9c90090e7351..f77840dd051e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -100,7 +100,15 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, if (clk_mgr_base->clks.dispclk_khz == 0 || dc->debug.force_clock_mode & 0x1) { + /* this is from resume or boot up, if forced_clock cfg option + * used, we bypass program dispclk and DPPCLK, but need set them + * for S3. + */ + force_reset = true; + /* force_clock_mode 0x1: force reset the clock even it is the + * same clock as long as it is in Passive level. + */ dcn2_read_clocks_from_hw_dentist(clk_mgr_base); } @@ -150,11 +158,14 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { if (dpp_clock_lowered) { + // if clock is being lowered, increase DTO before lowering refclk dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); dcn20_update_clocks_update_dentist(clk_mgr, context); } else { + // if clock is being raised, increase refclk before lowering DTO if (update_dppclk || update_dispclk) dcn20_update_clocks_update_dentist(clk_mgr, context); + // always update dtos unless clock is lowered and not safe to lower dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c index 09ea65acb2c4..63798132ed95 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c @@ -103,5 +103,5 @@ void hubbub201_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; - hubbub->detile_buf_size = 164 * 1024; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 45143459eedd..678db949cfe3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -474,6 +474,10 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets( &info_frame->hdrsmd, true); + /* packetIndex 4 is used for send immediate sdp message, and please + * use other packetIndex (such as 5,6) for other info packet + */ + if (info_frame->adaptive_sync.valid) enc->vpg->funcs->update_generic_info_packet( enc->vpg, diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile index 184b1f23aa77..db9048974d74 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/Makefile +++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile @@ -102,10 +102,6 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN21) ############################################################################### -############################################################################### - -############################################################################### - RESOURCE_DCN30 = dcn30_resource.o AMD_DAL_RESOURCE_DCN30 = $(addprefix $(AMDDALPATH)/dc/resource/dcn30/,$(RESOURCE_DCN30)) @@ -202,6 +198,4 @@ AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN351) ############################################################################### -############################################################################### - endif -- cgit v1.2.3-70-g09d2 From 7d63d82d730c8f5ddca7ae9135675162bfe1cdc5 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 22 Mar 2024 17:02:54 -0600 Subject: drm/amd/display: Includes adjustments This commit clean up some of the includes used by DCN. Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c | 4 ---- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c | 2 -- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c | 2 -- drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h | 1 - drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h | 2 +- 5 files changed, 1 insertion(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c index f9d6a181164a..b851fc65f5b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c @@ -34,11 +34,7 @@ #include "reg_helper.h" #include "fixed31_32.h" -#ifdef _WIN32 -#include "atombios.h" -#else #include "atom.h" -#endif #define TO_DMUB_ABM(abm)\ container_of(abm, struct dce_abm, base) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c index 1fb8fd7afc95..b8e31b5ea114 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c @@ -30,8 +30,6 @@ #include "dcn30_dio_link_encoder.h" #include "stream_encoder.h" #include "dc_bios_types.h" -/* #include "dcn3ag/dcn3ag_phy_fw.h" */ - #include "gpio_service_interface.h" #define CTX \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 005dbe099a7a..8ed7125d230d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -30,8 +30,6 @@ #include "hw_shared.h" #include "dc.h" #include "core_types.h" -#include - #define DC_LOGGER \ enc1->base.ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h index 499052329ebb..1212fcee38f2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_stream_encoder.h @@ -28,7 +28,6 @@ #include "dcn30/dcn30_vpg.h" #include "dcn30/dcn30_afmt.h" #include "stream_encoder.h" -#include "dcn10/dcn10_link_encoder.h" #include "dcn20/dcn20_stream_encoder.h" /* Register bit field name change */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index 9e4ddc985240..55529c5f471c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -31,7 +31,7 @@ #define __DCN_CALCS_H__ #include "bw_fixed.h" -#include "../dml/display_mode_lib.h" +#include "dml/display_mode_lib.h" struct dc; -- cgit v1.2.3-70-g09d2 From df398f02327352a70e3cf4ac99370662880317e4 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Fri, 22 Mar 2024 17:44:26 -0600 Subject: drm/amd/display: Add color logs for dcn20 Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c index 884e3e323338..ef6488165b8f 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c @@ -67,6 +67,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .setup_stereo = dcn10_setup_stereo, .set_avmute = dce110_set_avmute, .log_hw_state = dcn10_log_hw_state, + .log_color_state = dcn20_log_color_state, .get_hw_state = dcn10_get_hw_state, .clear_status_bits = dcn10_clear_status_bits, .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect, -- cgit v1.2.3-70-g09d2 From 52821adbc30668e9ebf3b236b2c9f9ac9a135446 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Mon, 25 Mar 2024 13:52:04 -0600 Subject: drm/amd/display: Enable FGCG for DCN351 Signed-off-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 66ab6aba26aa..8a57adb27264 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1893,6 +1893,8 @@ static bool dcn351_resource_construct( if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; + /*HW default is to have all the FGCG enabled, SW no need to program them*/ + dc->debug.enable_fine_grain_clock_gating.u32All = 0xFFFF; // Init the vm_helper if (dc->vm_helper) vm_helper_init(dc->vm_helper, 16); -- cgit v1.2.3-70-g09d2 From 7c1d9e10e6643121f1ffe9c0903467cc8682eba8 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Thu, 28 Mar 2024 11:00:50 +0800 Subject: drm/amd/pm: fix the high voltage issue after unload fix the high voltage issue after unload on smu 13.0.10 Signed-off-by: Kenneth Feng Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 26 ++++++++++++--------- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 27 ++++++++++++++++++++-- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 + .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 8 ++++++- 4 files changed, 48 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 66a5979e075d..ed34c4893108 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4145,18 +4145,22 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->ip_blocks[i].status.hw = true; } } + } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + !amdgpu_device_has_display_hardware(adev)) { + r = psp_gpu_reset(adev); } else { - tmp = amdgpu_reset_method; - /* It should do a default reset when loading or reloading the driver, - * regardless of the module parameter reset_method. - */ - amdgpu_reset_method = AMD_RESET_METHOD_NONE; - r = amdgpu_asic_reset(adev); - amdgpu_reset_method = tmp; - if (r) { - dev_err(adev->dev, "asic reset on init failed\n"); - goto failed; - } + tmp = amdgpu_reset_method; + /* It should do a default reset when loading or reloading the driver, + * regardless of the module parameter reset_method. + */ + amdgpu_reset_method = AMD_RESET_METHOD_NONE; + r = amdgpu_asic_reset(adev); + amdgpu_reset_method = tmp; + } + + if (r) { + dev_err(adev->dev, "asic reset on init failed\n"); + goto failed; } } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 246b211b1e85..65333141b1c1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -735,7 +735,7 @@ static int smu_early_init(void *handle) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; smu->is_apu = false; - smu->smu_baco.state = SMU_BACO_STATE_EXIT; + smu->smu_baco.state = SMU_BACO_STATE_NONE; smu->smu_baco.platform_support = false; smu->user_dpm_profile.fan_mode = -1; @@ -1966,10 +1966,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) return 0; } +static int smu_reset_mp1_state(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + + if ((!adev->in_runpm) && (!adev->in_suspend) && + (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 10) && + !amdgpu_device_has_display_hardware(adev)) + ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); + + return ret; +} + static int smu_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = adev->powerplay.pp_handle; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; @@ -1987,7 +2002,15 @@ static int smu_hw_fini(void *handle) adev->pm.dpm_enabled = false; - return smu_smc_hw_cleanup(smu); + ret = smu_smc_hw_cleanup(smu); + if (ret) + return ret; + + ret = smu_reset_mp1_state(smu); + if (ret) + return ret; + + return 0; } static void smu_late_fini(void *handle) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index a870bdd49a4e..1fa81575788c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -424,6 +424,7 @@ enum smu_reset_mode { enum smu_baco_state { SMU_BACO_STATE_ENTER = 0, SMU_BACO_STATE_EXIT, + SMU_BACO_STATE_NONE, }; struct smu_baco_context { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 9c03296f92cd..67117ced7c6a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2751,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, switch (mp1_state) { case PP_MP1_STATE_UNLOAD: - ret = smu_cmn_set_mp1_state(smu, mp1_state); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_PrepareMp1ForUnload, + 0x55, NULL); + + if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) + ret = smu_v13_0_disable_pmfw_state(smu); + break; default: /* Ignore others */ -- cgit v1.2.3-70-g09d2 From e5040d1496be041a586bb74357703677b0cac232 Mon Sep 17 00:00:00 2001 From: Ma Jun Date: Mon, 25 Mar 2024 09:37:09 +0800 Subject: drm/amdgpu: Add a new runtime mode definition Add a new runtime pm mode AMDGPU_RUNPM_BAMACO and related macro definition Signed-off-by: Ma Jun Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 621200e0823f..e6cad9f7aaeb 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -50,8 +50,12 @@ enum amdgpu_runpm_mode { AMDGPU_RUNPM_PX, AMDGPU_RUNPM_BOCO, AMDGPU_RUNPM_BACO, + AMDGPU_RUNPM_BAMACO, }; +#define BACO_SUPPORT (1<<0) +#define MACO_SUPPORT (1<<1) + struct amdgpu_ps { u32 caps; /* vbios flags */ u32 class; /* vbios flags */ -- cgit v1.2.3-70-g09d2 From 1b19959427988f02236d5fe5ddb776705da22c87 Mon Sep 17 00:00:00 2001 From: Ma Jun Date: Mon, 25 Mar 2024 10:17:52 +0800 Subject: drm/amdgpu/pm: Change the member function name in pp_hwmgr_func and pptable_funcs Use a unified and more explicit name get_bamaco_support to replace is_baco_support and get_asic_baco_capability Signed-off-by: Ma Jun Suggested-by: Lijo Lazar Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 4 ++-- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 4 ++-- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 6 ++++-- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 4 ++-- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 4 ++-- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 2 +- 27 files changed, 34 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index aed0e2cefbf9..581dadfd13f9 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1379,10 +1379,10 @@ static bool pp_get_asic_baco_capability(void *handle) return false; if (!(hwmgr->not_vf && amdgpu_dpm) || - !hwmgr->hwmgr_func->get_asic_baco_capability) + !hwmgr->hwmgr_func->get_bamaco_support) return false; - return hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr); + return hwmgr->hwmgr_func->get_bamaco_support(hwmgr); } static int pp_get_asic_baco_state(void *handle, int *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c index e8a9471c1898..8490614e2d7e 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c @@ -33,7 +33,7 @@ #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" -bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr) +bool smu7_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h index 73a773f4ce2e..93df1b9dfbd9 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool smu7_baco_get_capability(struct pp_hwmgr *hwmgr); +extern bool smu7_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index aa91730e4eaf..1fcd4451001f 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -5791,7 +5791,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_power_profile_mode = smu7_get_power_profile_mode, .set_power_profile_mode = smu7_set_power_profile_mode, .get_performance_level = smu7_get_performance_level, - .get_asic_baco_capability = smu7_baco_get_capability, + .get_bamaco_support = smu7_get_bamaco_support, .get_asic_baco_state = smu7_baco_get_state, .set_asic_baco_state = smu7_baco_set_state, .power_off_asic = smu7_power_off_asic, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c index c66ef9741535..938812d6b978 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c @@ -28,7 +28,7 @@ #include "vega10_inc.h" #include "smu9_baco.h" -bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr) +bool smu9_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg, data; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h index 9ff7c2ea1b58..67a5cec8e6b5 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool smu9_baco_get_capability(struct pp_hwmgr *hwmgr); +extern bool smu9_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); #endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index 6d6bc6a380b3..9f5bd998c6bf 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -5756,7 +5756,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .set_power_limit = vega10_set_power_limit, .odn_edit_dpm_table = vega10_odn_edit_dpm_table, .get_performance_level = vega10_get_performance_level, - .get_asic_baco_capability = smu9_baco_get_capability, + .get_bamaco_support = smu9_get_bamaco_support, .get_asic_baco_state = smu9_baco_get_state, .set_asic_baco_state = vega10_baco_set_state, .enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index 460067933de2..c223e3a6bfca 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2966,7 +2966,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = { .start_thermal_controller = vega12_start_thermal_controller, .powergate_gfx = vega12_gfx_off_control, .get_performance_level = vega12_get_performance_level, - .get_asic_baco_capability = smu9_baco_get_capability, + .get_bamaco_support = smu9_get_bamaco_support, .get_asic_baco_state = smu9_baco_get_state, .set_asic_baco_state = vega12_baco_set_state, .get_ppfeature_status = vega12_get_ppfeature_status, diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c index dad4c80aee58..0d0523e046d4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c @@ -36,7 +36,7 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0}, }; -bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr) +bool vega20_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h index bdad9c915631..a2c0f4e7813c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool vega20_baco_get_capability(struct pp_hwmgr *hwmgr); +extern bool vega20_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 3b33af30eb0f..f9efb0bad807 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -4422,7 +4422,7 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { .notify_cac_buffer_info = vega20_notify_cac_buffer_info, .enable_mgpu_fan_boost = vega20_enable_mgpu_fan_boost, /* BACO related */ - .get_asic_baco_capability = vega20_baco_get_capability, + .get_bamaco_support = vega20_get_bamaco_support, .get_asic_baco_state = vega20_baco_get_state, .set_asic_baco_state = vega20_baco_set_state, .set_mp1_state = vega20_set_mp1_state, diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 6f536159df4d..5b3da41f159f 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -351,7 +351,7 @@ struct pp_hwmgr_func { int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); - bool (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr); + bool (*get_bamaco_support)(struct pp_hwmgr *hwmgr); int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state); int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 65333141b1c1..ed4285665ca3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -3230,10 +3230,10 @@ static bool smu_get_baco_capability(void *handle) if (!smu->pm_enabled) return false; - if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support) + if (!smu->ppt_funcs || !smu->ppt_funcs->get_bamaco_support) return false; - return smu->ppt_funcs->baco_is_support(smu); + return smu->ppt_funcs->get_bamaco_support(smu); } static int smu_baco_set_state(void *handle, int state) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 1fa81575788c..3ef76f9543f3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -1174,9 +1174,11 @@ struct pptable_funcs { int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); /** - * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off). + * @get_bamaco_support: Check if GPU supports BACO/MACO + * BACO: Bus Active, Chip Off + * MACO: Memory Active, Chip Off */ - bool (*baco_is_support)(struct smu_context *smu); + bool (*get_bamaco_support)(struct smu_context *smu); /** * @baco_get_state: Get the current BACO state. diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index a0e5ad0381d6..0e8fdf3bccc4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -237,7 +237,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); -bool smu_v11_0_baco_is_support(struct smu_context *smu); +bool smu_v11_0_get_bamaco_support(struct smu_context *smu); enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index fbd57fa1a004..bad7e73e9fe0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -210,7 +210,7 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); -bool smu_v13_0_baco_is_support(struct smu_context *smu); +bool smu_v13_0_get_bamaco_support(struct smu_context *smu); int smu_v13_0_baco_enter(struct smu_context *smu); int smu_v13_0_baco_exit(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 4af1985ae446..31bbbc1ddda2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -160,7 +160,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu); int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_baco_seq baco_seq); -bool smu_v14_0_baco_is_support(struct smu_context *smu); +bool smu_v14_0_get_bamaco_support(struct smu_context *smu); enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 0c2d04f978ac..6d334a2aff67 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2387,7 +2387,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, + .get_bamaco_support = smu_v11_0_get_bamaco_support, .baco_enter = smu_v11_0_baco_enter, .baco_exit = smu_v11_0_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 836b1df79928..5a68d365967f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -3538,7 +3538,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, + .get_bamaco_support = smu_v11_0_get_bamaco_support, .baco_enter = navi10_baco_enter, .baco_exit = navi10_baco_exit, .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 1f18b61884f3..e426f457a017 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -4431,7 +4431,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, + .get_bamaco_support = smu_v11_0_get_bamaco_support, .baco_enter = sienna_cichlid_baco_enter, .baco_exit = sienna_cichlid_baco_exit, .mode1_reset_is_support = sienna_cichlid_is_mode1_reset_supported, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 5e5da9b16718..f6913820dbb7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1557,7 +1557,7 @@ int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); } -bool smu_v11_0_baco_is_support(struct smu_context *smu) +bool smu_v11_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index f41ac6465f2a..639edcb90deb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1581,7 +1581,7 @@ out: adev->unique_id = ((uint64_t)upper32 << 32) | lower32; } -static bool aldebaran_is_baco_supported(struct smu_context *smu) +static bool aldebaran_get_bamaco_support(struct smu_context *smu) { /* aldebaran is not support baco */ @@ -2059,7 +2059,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .register_irq_handler = smu_v13_0_register_irq_handler, .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = aldebaran_is_baco_supported, + .get_bamaco_support = aldebaran_get_bamaco_support, .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index ce16f2a08a47..0193b81eab14 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2268,7 +2268,7 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu, return ret; } -bool smu_v13_0_baco_is_support(struct smu_context *smu) +bool smu_v13_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 67117ced7c6a..1e09d5f2d82f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -3076,7 +3076,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .set_tool_table_location = smu_v13_0_set_tool_table_location, .deep_sleep_control = smu_v13_0_deep_sleep_control, .gfx_ulv_control = smu_v13_0_gfx_ulv_control, - .baco_is_support = smu_v13_0_baco_is_support, + .get_bamaco_support = smu_v13_0_get_bamaco_support, .baco_enter = smu_v13_0_baco_enter, .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 1258060816dd..7c3a85879fd5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2077,7 +2077,7 @@ static void smu_v13_0_6_get_unique_id(struct smu_context *smu) adev->unique_id = pptable->PublicSerialNumber_AID; } -static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu) +static bool smu_v13_0_6_get_bamaco_support(struct smu_context *smu) { /* smu_13_0_6 does not support baco */ @@ -3188,7 +3188,7 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .enable_thermal_alert = smu_v13_0_enable_thermal_alert, .disable_thermal_alert = smu_v13_0_disable_thermal_alert, .setup_pptable = smu_v13_0_6_setup_pptable, - .baco_is_support = smu_v13_0_6_is_baco_supported, + .get_bamaco_support = smu_v13_0_6_get_bamaco_support, .get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 7318964f1f14..e996a0a4d33e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2650,7 +2650,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .set_tool_table_location = smu_v13_0_set_tool_table_location, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, - .baco_is_support = smu_v13_0_baco_is_support, + .get_bamaco_support = smu_v13_0_get_bamaco_support, .baco_enter = smu_v13_0_baco_enter, .baco_exit = smu_v13_0_baco_exit, .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 07a65e005785..5bf6bde99161 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1590,7 +1590,7 @@ int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, return 0; } -bool smu_v14_0_baco_is_support(struct smu_context *smu) +bool smu_v14_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; -- cgit v1.2.3-70-g09d2 From b2207dc6989f5c913cca932eb8720b6ad0725d67 Mon Sep 17 00:00:00 2001 From: Ma Jun Date: Wed, 27 Mar 2024 17:26:08 +0800 Subject: drm/amdgpu/pm: Add support for MACO flag checking Add support for MACO flag checking. MACO mode only works if BACO is supported. Signed-off-by: Ma Jun Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 8 +++++--- drivers/gpu/drm/amd/amdgpu/cik.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/si.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/soc15.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/vi.c | 8 ++++---- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 2 +- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 8 ++++---- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 2 +- drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c | 6 +++--- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c | 8 ++++---- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h | 2 +- drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c | 8 ++++---- drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h | 2 +- drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 3 ++- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 14 +++++++++----- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 4 ++-- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 14 +++++++++----- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 4 ++-- drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 14 +++++++++----- 27 files changed, 79 insertions(+), 64 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index b3b84647207e..65c17c59c152 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -606,7 +606,7 @@ struct amdgpu_asic_funcs { /* PCIe replay counter */ uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); /* device supports BACO */ - bool (*supports_baco)(struct amdgpu_device *adev); + int (*supports_baco)(struct amdgpu_device *adev); /* pre asic_init quirks */ void (*pre_asic_init)(struct amdgpu_device *adev); /* enter/exit umd stable pstate */ @@ -1408,7 +1408,7 @@ bool amdgpu_device_supports_atpx(struct drm_device *dev); bool amdgpu_device_supports_px(struct drm_device *dev); bool amdgpu_device_supports_boco(struct drm_device *dev); bool amdgpu_device_supports_smart_shift(struct drm_device *dev); -bool amdgpu_device_supports_baco(struct drm_device *dev); +int amdgpu_device_supports_baco(struct drm_device *dev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); int amdgpu_device_baco_enter(struct drm_device *dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ed34c4893108..f830024cffca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -338,10 +338,12 @@ bool amdgpu_device_supports_boco(struct drm_device *dev) * * @dev: drm_device pointer * - * Returns true if the device supporte BACO, - * otherwise return false. + * Return: + * 1 if the device supporte BACO; + * 3 if the device support MACO (only works if BACO is supported) + * otherwise return 0. */ -bool amdgpu_device_supports_baco(struct drm_device *dev) +int amdgpu_device_supports_baco(struct drm_device *dev) { struct amdgpu_device *adev = drm_to_adev(dev); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index a3a643254d7a..fdbc26346b54 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1375,14 +1375,14 @@ static int cik_asic_pci_config_reset(struct amdgpu_device *adev) return r; } -static bool cik_asic_supports_baco(struct amdgpu_device *adev) +static int cik_asic_supports_baco(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_BONAIRE: case CHIP_HAWAII: return amdgpu_dpm_is_baco_supported(adev); default: - return false; + return 0; } } diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 23e4ef4fff7c..67e179c7e347 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1409,9 +1409,9 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev) return r; } -static bool si_asic_supports_baco(struct amdgpu_device *adev) +static int si_asic_supports_baco(struct amdgpu_device *adev) { - return false; + return 0; } static enum amd_reset_method diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index dec81ccf6240..c8abbf5da736 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -502,7 +502,7 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev) static enum amd_reset_method soc15_asic_reset_method(struct amdgpu_device *adev) { - bool baco_reset = false; + int baco_reset = 0; bool connected_to_cpu = false; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); @@ -540,7 +540,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev) */ if (ras && adev->ras_enabled && adev->pm.fw_version <= 0x283400) - baco_reset = false; + baco_reset = 0; } else { baco_reset = amdgpu_dpm_is_baco_supported(adev); } @@ -620,7 +620,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev) } } -static bool soc15_supports_baco(struct amdgpu_device *adev) +static int soc15_supports_baco(struct amdgpu_device *adev) { switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(9, 0, 0): @@ -628,13 +628,13 @@ static bool soc15_supports_baco(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20) { if (adev->psp.sos.fw_version >= 0x80067) return amdgpu_dpm_is_baco_supported(adev); - return false; + return 0; } else { return amdgpu_dpm_is_baco_supported(adev); } break; default: - return false; + return 0; } } diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 1a98812981f4..2415355b037c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -897,7 +897,7 @@ static int vi_asic_pci_config_reset(struct amdgpu_device *adev) return r; } -static bool vi_asic_supports_baco(struct amdgpu_device *adev) +static int vi_asic_supports_baco(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_FIJI: @@ -908,14 +908,14 @@ static bool vi_asic_supports_baco(struct amdgpu_device *adev) case CHIP_TOPAZ: return amdgpu_dpm_is_baco_supported(adev); default: - return false; + return 0; } } static enum amd_reset_method vi_asic_reset_method(struct amdgpu_device *adev) { - bool baco_reset; + int baco_reset; if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY || amdgpu_reset_method == AMD_RESET_METHOD_BACO) @@ -935,7 +935,7 @@ vi_asic_reset_method(struct amdgpu_device *adev) baco_reset = amdgpu_dpm_is_baco_supported(adev); break; default: - baco_reset = false; + baco_reset = 0; break; } diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index afb930b70615..805c9d37a2b4 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -421,7 +421,7 @@ struct amd_pm_funcs { int (*set_hard_min_dcefclk_by_freq)(void *handle, uint32_t clock); int (*set_hard_min_fclk_by_freq)(void *handle, uint32_t clock); int (*set_min_deep_sleep_dcefclk)(void *handle, uint32_t clock); - bool (*get_asic_baco_capability)(void *handle); + int (*get_asic_baco_capability)(void *handle); int (*get_asic_baco_state)(void *handle, int *state); int (*set_asic_baco_state)(void *handle, int state); int (*get_ppfeature_status)(void *handle, char *buf); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index f84bfed50681..eee919577b44 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -199,14 +199,14 @@ int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en) return ret; } -bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) +int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - bool ret; + int ret; if (!pp_funcs || !pp_funcs->get_asic_baco_capability) - return false; + return 0; /* Don't use baco for reset in S3. * This is a workaround for some platforms * where entering BACO during suspend @@ -217,7 +217,7 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) * devices. Needs more investigation. */ if (adev->in_s3) - return false; + return 0; mutex_lock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index e6cad9f7aaeb..501f8c726e8d 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -411,7 +411,7 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev); int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev); int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev); -bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev); +int amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev); bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev); int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 581dadfd13f9..133d1ee6e67c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1371,7 +1371,7 @@ static int pp_set_active_display_count(void *handle, uint32_t count) return phm_set_active_display_count(hwmgr, count); } -static bool pp_get_asic_baco_capability(void *handle) +static int pp_get_asic_baco_capability(void *handle) { struct pp_hwmgr *hwmgr = handle; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c index 8490614e2d7e..ad60918aaae1 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c @@ -33,7 +33,7 @@ #include "smu/smu_7_1_2_d.h" #include "smu/smu_7_1_2_sh_mask.h" -bool smu7_get_bamaco_support(struct pp_hwmgr *hwmgr) +int smu7_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; @@ -44,9 +44,9 @@ bool smu7_get_bamaco_support(struct pp_hwmgr *hwmgr) reg = RREG32(mmCC_BIF_BX_FUSESTRAP0); if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK) - return true; + return BACO_SUPPORT; - return false; + return 0; } int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h index 93df1b9dfbd9..750082ea74d8 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool smu7_get_bamaco_support(struct pp_hwmgr *hwmgr); +extern int smu7_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c index 938812d6b978..c1ce1d7cae48 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c @@ -28,13 +28,13 @@ #include "vega10_inc.h" #include "smu9_baco.h" -bool smu9_get_bamaco_support(struct pp_hwmgr *hwmgr) +int smu9_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg, data; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) - return false; + return 0; WREG32(0x12074, 0xFFF0003B); data = RREG32(0x12075); @@ -43,10 +43,10 @@ bool smu9_get_bamaco_support(struct pp_hwmgr *hwmgr) reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - return true; + return BACO_SUPPORT; } - return false; + return 0; } int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h index 67a5cec8e6b5..2c100482084c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool smu9_get_bamaco_support(struct pp_hwmgr *hwmgr); +extern int smu9_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); #endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c index 0d0523e046d4..424e4ec9e389 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c @@ -36,22 +36,22 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { {CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0}, }; -bool vega20_get_bamaco_support(struct pp_hwmgr *hwmgr) +int vega20_get_bamaco_support(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); uint32_t reg; if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) - return false; + return 0; if (((RREG32(0x17569) & 0x20000000) >> 29) == 0x1) { reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0); if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - return true; + return BACO_SUPPORT; } - return false; + return 0; } int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h index a2c0f4e7813c..0f2dd8c008ba 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h @@ -25,7 +25,7 @@ #include "hwmgr.h" #include "common_baco.h" -extern bool vega20_get_bamaco_support(struct pp_hwmgr *hwmgr); +extern int vega20_get_bamaco_support(struct pp_hwmgr *hwmgr); extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 5b3da41f159f..69928a4a074b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -351,7 +351,7 @@ struct pp_hwmgr_func { int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); - bool (*get_bamaco_support)(struct pp_hwmgr *hwmgr); + int (*get_bamaco_support)(struct pp_hwmgr *hwmgr); int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state); int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index ed4285665ca3..dc2a864b0f51 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -737,6 +737,7 @@ static int smu_early_init(void *handle) smu->is_apu = false; smu->smu_baco.state = SMU_BACO_STATE_NONE; smu->smu_baco.platform_support = false; + smu->smu_baco.maco_support = false; smu->user_dpm_profile.fan_mode = -1; mutex_init(&smu->message_lock); @@ -3223,7 +3224,7 @@ static int smu_set_xgmi_pstate(void *handle, return ret; } -static bool smu_get_baco_capability(void *handle) +static int smu_get_baco_capability(void *handle) { struct smu_context *smu = handle; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 3ef76f9543f3..5dd092703b8d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -1178,7 +1178,7 @@ struct pptable_funcs { * BACO: Bus Active, Chip Off * MACO: Memory Active, Chip Off */ - bool (*get_bamaco_support)(struct smu_context *smu); + int (*get_bamaco_support)(struct smu_context *smu); /** * @baco_get_state: Get the current BACO state. diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index 0e8fdf3bccc4..c2ab336bb530 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -237,7 +237,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); -bool smu_v11_0_get_bamaco_support(struct smu_context *smu); +int smu_v11_0_get_bamaco_support(struct smu_context *smu); enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index bad7e73e9fe0..d9700a3f28d2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -210,7 +210,7 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); -bool smu_v13_0_get_bamaco_support(struct smu_context *smu); +int smu_v13_0_get_bamaco_support(struct smu_context *smu); int smu_v13_0_baco_enter(struct smu_context *smu); int smu_v13_0_baco_exit(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 31bbbc1ddda2..6cdfee5052d9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -160,7 +160,7 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu); int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_baco_seq baco_seq); -bool smu_v14_0_get_bamaco_support(struct smu_context *smu); +int smu_v14_0_get_bamaco_support(struct smu_context *smu); enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index f6913820dbb7..8407ca4fdc17 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1557,23 +1557,27 @@ int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); } -bool smu_v11_0_get_bamaco_support(struct smu_context *smu) +int smu_v11_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; + int bamaco_support = 0; if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; + return 0; + + if (smu_baco->maco_support) + bamaco_support |= MACO_SUPPORT; /* return true if ASIC is in BACO state already */ if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; + return bamaco_support |= BACO_SUPPORT; /* Arcturus does not support this bit mask */ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; + return 0; - return true; + return (bamaco_support |= BACO_SUPPORT); } enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 639edcb90deb..acf41db3ef72 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1581,11 +1581,11 @@ out: adev->unique_id = ((uint64_t)upper32 << 32) | lower32; } -static bool aldebaran_get_bamaco_support(struct smu_context *smu) +static int aldebaran_get_bamaco_support(struct smu_context *smu) { /* aldebaran is not support baco */ - return false; + return 0; } static int aldebaran_set_df_cstate(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 0193b81eab14..7277cc72d7f5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2268,22 +2268,26 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu, return ret; } -bool smu_v13_0_get_bamaco_support(struct smu_context *smu) +int smu_v13_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; + int bamaco_support = 0; if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; + return 0; + + if (smu_baco->maco_support) + bamaco_support |= MACO_SUPPORT; /* return true if ASIC is in BACO state already */ if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; + return bamaco_support |= BACO_SUPPORT; if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; + return 0; - return true; + return (bamaco_support |= BACO_SUPPORT); } int smu_v13_0_baco_enter(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 7c3a85879fd5..624432b3b483 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2077,11 +2077,11 @@ static void smu_v13_0_6_get_unique_id(struct smu_context *smu) adev->unique_id = pptable->PublicSerialNumber_AID; } -static bool smu_v13_0_6_get_bamaco_support(struct smu_context *smu) +static int smu_v13_0_6_get_bamaco_support(struct smu_context *smu) { /* smu_13_0_6 does not support baco */ - return false; + return 0; } static const char *const throttling_logging_label[] = { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 5bf6bde99161..707b37f535e2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1590,23 +1590,27 @@ int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, return 0; } -bool smu_v14_0_get_bamaco_support(struct smu_context *smu) +int smu_v14_0_get_bamaco_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; + int bamaco_support = 0; if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support) - return false; + return 0; + + if (smu_baco->maco_support) + bamaco_support |= MACO_SUPPORT; /* return true if ASIC is in BACO state already */ if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) - return true; + return (bamaco_support |= BACO_SUPPORT); if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) - return false; + return 0; - return true; + return (bamaco_support |= BACO_SUPPORT); } enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu) -- cgit v1.2.3-70-g09d2 From 327eec542746b254d9585410bba7f8fdcf61404e Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 31 Mar 2024 12:59:17 +0800 Subject: drm/amdgpu: Bypass asd if display hw is not available ASD is not needed by headless GPU. Signed-off-by: Hawking Zhang Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 94b310fdb719..83bf86352267 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1053,6 +1053,11 @@ static int psp_asd_initialize(struct psp_context *psp) if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes) return 0; + /* bypass asd if display hardware is not available */ + if (!amdgpu_device_has_display_hardware(psp->adev) && + amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10)) + return 0; + psp->asd_context.mem_context.shared_mc_addr = 0; psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE; psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD; -- cgit v1.2.3-70-g09d2 From 166a3c735c959d4c60427b47bb9c7d9187891a78 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Mon, 1 Apr 2024 08:30:58 +0800 Subject: drm/amd/pm: centralize all pp_dpm_xxx attribute nodes update cb centralize all pp_dpm_xxx attr nodes into pp_dpm_clk_default_attr_update() function. Signed-off-by: Yang Wang Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 207 ++++++++++++++++++++----------------- 1 file changed, 115 insertions(+), 92 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index b410df28ccb2..5bc1cd4993e8 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -38,6 +38,8 @@ #define MAX_NUM_OF_FEATURES_PER_SUBSET 8 #define MAX_NUM_OF_SUBSETS 8 +#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name) + struct od_attribute { struct kobj_attribute attribute; struct list_head entry; @@ -2115,6 +2117,99 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_ return 0; } +static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, + uint32_t mask, enum amdgpu_device_attr_states *states) +{ + struct device_attribute *dev_attr = &attr->dev_attr; + enum amdgpu_device_attr_id attr_id = attr->attr_id; + uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); + + *states = ATTR_STATE_SUPPORTED; + + if (!(attr->flags & mask)) { + *states = ATTR_STATE_UNSUPPORTED; + return 0; + } + + if (DEVICE_ATTR_IS(pp_dpm_socclk)) { + if (gc_ver < IP_VERSION(9, 0, 0)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { + if (mp1_ver < IP_VERSION(10, 0, 0)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(10, 1, 2) || + gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(9, 4, 3))) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { + if (!((gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { + if (!(gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 3) || + gc_ver == IP_VERSION(10, 3, 6) || + gc_ver == IP_VERSION(10, 3, 7) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(10, 1, 2) || + gc_ver == IP_VERSION(11, 0, 0) || + gc_ver == IP_VERSION(11, 0, 1) || + gc_ver == IP_VERSION(11, 0, 4) || + gc_ver == IP_VERSION(11, 5, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3) || + gc_ver == IP_VERSION(9, 4, 3))) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { + if (!((gc_ver == IP_VERSION(10, 3, 1) || + gc_ver == IP_VERSION(10, 3, 0) || + gc_ver == IP_VERSION(11, 0, 2) || + gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + *states = ATTR_STATE_UNSUPPORTED; + } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { + if (gc_ver == IP_VERSION(9, 4, 2) || + gc_ver == IP_VERSION(9, 4, 3)) + *states = ATTR_STATE_UNSUPPORTED; + } + + switch (gc_ver) { + case IP_VERSION(9, 4, 1): + case IP_VERSION(9, 4, 2): + /* the Mi series card does not support standalone mclk/socclk/fclk level setting */ + if (DEVICE_ATTR_IS(pp_dpm_mclk) || + DEVICE_ATTR_IS(pp_dpm_socclk) || + DEVICE_ATTR_IS(pp_dpm_fclk)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + break; + default: + break; + } + + /* setting should not be allowed from VF if not in one VF mode */ + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) { + dev_attr->attr.mode &= ~S_IWUGO; + dev_attr->store = NULL; + } + + return 0; +} + /* Following items will be read out to indicate current plpd policy: * - -1: none * - 0: disallow @@ -2186,17 +2281,26 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, .attr_update = pp_dpm_dcefclk_attr_update), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, + .attr_update = pp_dpm_clk_default_attr_update), AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2204,7 +2308,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { .attr_update = pp_od_clk_voltage_attr_update), AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2227,7 +2331,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ { struct device_attribute *dev_attr = &attr->dev_attr; enum amdgpu_device_attr_id attr_id = attr->attr_id; - uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); if (!(attr->flags & mask)) { @@ -2235,15 +2338,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ return 0; } -#define DEVICE_ATTR_IS(_name) (attr_id == device_attr_id__##_name) - - if (DEVICE_ATTR_IS(pp_dpm_socclk)) { - if (gc_ver < IP_VERSION(9, 0, 0)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { - if (mp1_ver < IP_VERSION(10, 0, 0)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(mem_busy_percent)) { + if (DEVICE_ATTR_IS(mem_busy_percent)) { if ((adev->flags & AMD_IS_APU && gc_ver != IP_VERSION(9, 4, 3)) || gc_ver == IP_VERSION(9, 0, 1)) @@ -2287,48 +2382,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ } else if (DEVICE_ATTR_IS(gpu_metrics)) { if (gc_ver < IP_VERSION(9, 1, 0)) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { - if (!(gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 3) || - gc_ver == IP_VERSION(10, 3, 6) || - gc_ver == IP_VERSION(10, 3, 7) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(10, 1, 2) || - gc_ver == IP_VERSION(11, 0, 0) || - gc_ver == IP_VERSION(11, 0, 1) || - gc_ver == IP_VERSION(11, 0, 4) || - gc_ver == IP_VERSION(11, 5, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3) || - gc_ver == IP_VERSION(9, 4, 3))) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { - if (!((gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { - if (!(gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 3) || - gc_ver == IP_VERSION(10, 3, 6) || - gc_ver == IP_VERSION(10, 3, 7) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(10, 1, 2) || - gc_ver == IP_VERSION(11, 0, 0) || - gc_ver == IP_VERSION(11, 0, 1) || - gc_ver == IP_VERSION(11, 0, 4) || - gc_ver == IP_VERSION(11, 5, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3) || - gc_ver == IP_VERSION(9, 4, 3))) - *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { - if (!((gc_ver == IP_VERSION(10, 3, 1) || - gc_ver == IP_VERSION(10, 3, 0) || - gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) - *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; @@ -2350,23 +2403,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; - } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { - if (gc_ver == IP_VERSION(9, 4, 2) || - gc_ver == IP_VERSION(9, 4, 3)) - *states = ATTR_STATE_UNSUPPORTED; } switch (gc_ver) { - case IP_VERSION(9, 4, 1): - case IP_VERSION(9, 4, 2): - /* the Mi series card does not support standalone mclk/socclk/fclk level setting */ - if (DEVICE_ATTR_IS(pp_dpm_mclk) || - DEVICE_ATTR_IS(pp_dpm_socclk) || - DEVICE_ATTR_IS(pp_dpm_fclk)) { - dev_attr->attr.mode &= ~S_IWUGO; - dev_attr->store = NULL; - } - break; case IP_VERSION(10, 3, 0): if (DEVICE_ATTR_IS(power_dpm_force_performance_level) && amdgpu_sriov_vf(adev)) { @@ -2378,22 +2417,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ break; } - /* setting should not be allowed from VF if not in one VF mode */ - if (amdgpu_sriov_vf(adev) && (!amdgpu_sriov_is_pp_one_vf(adev) || - DEVICE_ATTR_IS(pp_dpm_sclk) || - DEVICE_ATTR_IS(pp_dpm_mclk) || - DEVICE_ATTR_IS(pp_dpm_socclk) || - DEVICE_ATTR_IS(pp_dpm_fclk) || - DEVICE_ATTR_IS(pp_dpm_vclk) || - DEVICE_ATTR_IS(pp_dpm_vclk1) || - DEVICE_ATTR_IS(pp_dpm_dclk) || - DEVICE_ATTR_IS(pp_dpm_dclk1))) { - dev_attr->attr.mode &= ~S_IWUGO; - dev_attr->store = NULL; - } - -#undef DEVICE_ATTR_IS - return 0; } -- cgit v1.2.3-70-g09d2 From fcc0735b00872d9ff0d209551e66c08a79cd594a Mon Sep 17 00:00:00 2001 From: Ma Jun Date: Mon, 25 Mar 2024 11:56:41 +0800 Subject: drm/amdgpu: Add support for BAMACO mode checking Optimize the code to add support for BAMACO mode checking Signed-off-by: Ma Jun Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 6 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 23 +++++++++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 6 ++++-- 3 files changed, 27 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e4277298cf1a..6ea893ad9a36 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2744,7 +2744,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) { /* nothing to do */ - } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { + } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { amdgpu_device_baco_enter(drm_dev); } @@ -2784,7 +2785,8 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) * PCI core handles it for _PR3. */ pci_set_master(pdev); - } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { + } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { amdgpu_device_baco_exit(drm_dev); } ret = amdgpu_device_resume(drm_dev, false); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a2df3025a754..55eb4e4eb8f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -133,6 +133,7 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev) int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) { struct drm_device *dev; + int bamaco_support = 0; int r, acpi_status; dev = adev_to_drm(adev); @@ -158,8 +159,12 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */ adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; dev_info(adev->dev, "Using BOCO for runtime pm\n"); - } else if (amdgpu_device_supports_baco(dev) && - (amdgpu_runtime_pm != 0)) { + } else if (amdgpu_runtime_pm != 0) { + bamaco_support = amdgpu_device_supports_baco(dev); + + if (!bamaco_support) + goto no_runtime_pm; + switch (adev->asic_type) { case CHIP_VEGA20: case CHIP_ARCTURUS: @@ -178,10 +183,20 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) break; } - if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) - dev_info(adev->dev, "Using BACO for runtime pm\n"); + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { + if (bamaco_support & MACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; + dev_info(adev->dev, "Using BAMACO for runtime pm\n"); + } else { + dev_info(adev->dev, "Using BACO for runtime pm\n"); + } + } } +no_runtime_pm: + if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) + dev_info(adev->dev, "NO pm mode for runtime pm\n"); + /* Call ACPI methods: require modeset init * but failure is not fatal */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 83bf86352267..edae581059af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -2622,7 +2622,8 @@ static int psp_load_p2s_table(struct psp_context *psp) struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE]; - if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) + if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) return 0; if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6)) { @@ -2652,7 +2653,8 @@ static int psp_load_smu_fw(struct psp_context *psp) * Skip SMU FW reloading in case of using BACO for runpm only, * as SMU is always alive. */ - if (adev->in_runpm && (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO)) + if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO))) return 0; if (!ucode->fw || amdgpu_sriov_vf(psp->adev)) -- cgit v1.2.3-70-g09d2 From 5279a8506fdc42ecfb50a91bce71f5b64f25c3d0 Mon Sep 17 00:00:00 2001 From: Ma Jun Date: Mon, 25 Mar 2024 13:44:37 +0800 Subject: drm/amdgpu/pm: Check AMDGPU_RUNPM_BAMACO when setting baco state Check AMDGPU_RUNPM_BAMACO intead of amdgpu_runtime_pm when setting baco state. Signed-off-by: Ma Jun Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 5 ++--- drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 8407ca4fdc17..9d5ab2ea643a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1607,7 +1607,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) case IP_VERSION(11, 0, 11): case IP_VERSION(11, 0, 12): case IP_VERSION(11, 0, 13): - if (amdgpu_runtime_pm == 2) + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, D3HOT_BAMACO_SEQUENCE, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 7277cc72d7f5..a8d34adc7d3f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -2247,7 +2247,7 @@ static int smu_v13_0_baco_set_state(struct smu_context *smu, if (state == SMU_BACO_STATE_ENTER) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO, NULL); } else { @@ -2292,13 +2292,12 @@ int smu_v13_0_get_bamaco_support(struct smu_context *smu) int smu_v13_0_baco_enter(struct smu_context *smu) { - struct smu_baco_context *smu_baco = &smu->smu_baco; struct amdgpu_device *adev = smu->adev; int ret; if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { return smu_v13_0_baco_set_armd3_sequence(smu, - (smu_baco->maco_support && amdgpu_runtime_pm != 1) ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); } else { ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 707b37f535e2..2c3517397b14 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1633,7 +1633,7 @@ int smu_v14_0_baco_set_state(struct smu_context *smu, if (state == SMU_BACO_STATE_ENTER) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, - smu_baco->maco_support ? + (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO) ? BACO_SEQ_BAMACO : BACO_SEQ_BACO, NULL); } else { -- cgit v1.2.3-70-g09d2 From 91bc86011661c184e01bf3f693432ba4d0347217 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 6 Mar 2024 17:05:07 +0530 Subject: drm/amdgpu: Fix VCN allocation in CPX partition VCN need not be shared in CPX mode always for all GFX 9.4.3 SOC SKUs. In certain configs, VCN instance can be exclusively allocated to a partition even under CPX mode. Signed-off-by: Lijo Lazar Reviewed-by: James Zhu Reviewed-by: Asad Kamal Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index d6f808acfb17..fbb43ae7624f 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; } +static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev) +{ + return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst); +} + static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, uint32_t inst_idx, struct amdgpu_ring *ring) { @@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, case AMDGPU_RING_TYPE_VCN_ENC: case AMDGPU_RING_TYPE_VCN_JPEG: ip_blk = AMDGPU_XCP_VCN; - if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE) + if (aqua_vanjaram_xcp_vcn_shared(adev)) inst_mask = 1 << (inst_idx * 2); break; default: @@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update( aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); - /* VCN is shared by two partitions under CPX MODE */ + /* VCN may be shared by two partitions under CPX MODE in certain + * configs. + */ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || - ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && - adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE) + ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && + aqua_vanjaram_xcp_vcn_shared(adev)) aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); } -- cgit v1.2.3-70-g09d2 From fa643cdd37673c43190f90dce57aedd0ca5edb3f Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Fri, 29 Mar 2024 18:23:40 +0800 Subject: drm/amd/pm: update XGMI RAS UE criteria for sum v13.0.6 Add more possible ext error code. v2: still use ext error code instead of UC bit. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 624432b3b483..7998c188db5b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -2693,7 +2693,8 @@ static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, st ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]); err_cnt = MCA_REG__MISC0__ERRCNT(entry->regs[MCA_REG_IDX_MISC0]); - if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0) + if (type == AMDGPU_MCA_ERROR_TYPE_UE && + (ext_error_code == 0 || ext_error_code == 9)) *count = err_cnt; else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6) *count = err_cnt; -- cgit v1.2.3-70-g09d2 From 9ecef5b2d0a09eb54f8431208758bb2aa2a6b779 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 1 Apr 2024 15:46:16 +0800 Subject: drm/amdgpu: update check condition for XGMI ACA UE Check more possible ext error codes. Signed-off-by: Tao Zhou Reviewed-by: Yang Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 523827e8e7a8..dd2ec48cf5c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1060,7 +1060,9 @@ static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_ban switch (type) { case ACA_SMU_TYPE_UE: - count = ext_error_code == 0 ? count : 0ULL; + if (ext_error_code != 0 && ext_error_code != 9) + count = 0ULL; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, count); break; case ACA_SMU_TYPE_CE: -- cgit v1.2.3-70-g09d2 From f6ac0842364a5721c02e9dd1c956eb51c7431ff3 Mon Sep 17 00:00:00 2001 From: chongli2 Date: Tue, 26 Mar 2024 13:24:21 +0800 Subject: drm/amd/amdgpu: support MES command SET_HW_RESOURCE1 in sriov support MES command SET_HW_RESOURCE1 in sriov Signed-off-by: chongli2 Reviewed-by: Jingwen Chen Acked-by: Jingwen Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 6 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 5 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 4 +++ drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h | 9 ++++-- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 43 +++++++++++++++++++++++++++ drivers/gpu/drm/amd/include/mes_v11_api_def.h | 21 +++++++++++++ 6 files changed, 85 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 4c8fc3117ef8..6b3e1844eac5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -141,6 +141,12 @@ struct amdgpu_mes { /* ip specific functions */ const struct amdgpu_mes_funcs *funcs; + + /* mes resource_1 bo*/ + struct amdgpu_bo *resource_1; + uint64_t resource_1_gpu_addr; + void *resource_1_addr; + }; struct amdgpu_mes_process { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 6f01de220c44..461753904dea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -576,6 +576,11 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) vf2pf_info->decode_usage = 0; vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr; + vf2pf_info->mes_info_addr = (uint64_t)adev->mes.resource_1_gpu_addr; + + if (adev->mes.resource_1) { + vf2pf_info->mes_info_size = adev->mes.resource_1->tbo.base.size; + } vf2pf_info->checksum = amd_sriov_msg_checksum( vf2pf_info, vf2pf_info->header.size, 0, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index a858bc98cad4..a9f2f0c4f799 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -132,6 +132,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6), /* VCN RB decouple */ AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7), + /* MES info */ + AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -335,6 +337,8 @@ static inline bool is_virtual_machine(void) ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT) #define amdgpu_sriov_is_vcn_rb_decouple(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE) +#define amdgpu_sriov_is_mes_info_enable(adev) \ + ((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 51a14f6d93bd..0de78d6a83fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -94,7 +94,8 @@ union amd_sriov_msg_feature_flags { uint32_t reg_indirect_acc : 1; uint32_t av1_support : 1; uint32_t vcn_rb_decouple : 1; - uint32_t reserved : 24; + uint32_t mes_info_enable : 1; + uint32_t reserved : 23; } flags; uint32_t all; }; @@ -221,7 +222,7 @@ struct amd_sriov_msg_vf2pf_info_header { uint32_t reserved[2]; }; -#define AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE (70) +#define AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE (73) struct amd_sriov_msg_vf2pf_info { /* header contains size and version */ struct amd_sriov_msg_vf2pf_info_header header; @@ -265,7 +266,9 @@ struct amd_sriov_msg_vf2pf_info { uint32_t version; } ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE]; uint64_t dummy_page_addr; - + /* FB allocated for guest MES to record UQ info */ + uint64_t mes_info_addr; + uint32_t mes_info_size; /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE]; }; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 63f281a9984d..e5230078a4cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -422,6 +422,36 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) offsetof(union MESAPI_SET_HW_RESOURCES, api_status)); } +static int mes_v11_0_set_hw_resources_1(struct amdgpu_mes *mes) +{ + int size = 128 * PAGE_SIZE; + int ret = 0; + struct amdgpu_device *adev = mes->adev; + union MESAPI_SET_HW_RESOURCES_1 mes_set_hw_res_pkt; + memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt)); + + mes_set_hw_res_pkt.header.type = MES_API_TYPE_SCHEDULER; + mes_set_hw_res_pkt.header.opcode = MES_SCH_API_SET_HW_RSRC_1; + mes_set_hw_res_pkt.header.dwsize = API_FRAME_SIZE_IN_DWORDS; + mes_set_hw_res_pkt.enable_mes_info_ctx = 1; + + ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &mes->resource_1, + &mes->resource_1_gpu_addr, + &mes->resource_1_addr); + if (ret) { + dev_err(adev->dev, "(%d) failed to create mes resource_1 bo\n", ret); + return ret; + } + + mes_set_hw_res_pkt.mes_info_ctx_mc_addr = mes->resource_1_gpu_addr; + mes_set_hw_res_pkt.mes_info_ctx_size = mes->resource_1->tbo.base.size; + return mes_v11_0_submit_pkt_and_poll_completion(mes, + &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), + offsetof(union MESAPI_SET_HW_RESOURCES_1, api_status)); +} + static const struct amdgpu_mes_funcs mes_v11_0_funcs = { .add_hw_queue = mes_v11_0_add_hw_queue, .remove_hw_queue = mes_v11_0_remove_hw_queue, @@ -1203,6 +1233,14 @@ static int mes_v11_0_hw_init(void *handle) if (r) goto failure; + if (amdgpu_sriov_is_mes_info_enable(adev)) { + r = mes_v11_0_set_hw_resources_1(&adev->mes); + if (r) { + DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r); + goto failure; + } + } + r = mes_v11_0_query_sched_status(&adev->mes); if (r) { DRM_ERROR("MES is busy\n"); @@ -1226,6 +1264,11 @@ failure: static int mes_v11_0_hw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (amdgpu_sriov_is_mes_info_enable(adev)) { + amdgpu_bo_free_kernel(&adev->mes.resource_1, &adev->mes.resource_1_gpu_addr, + &adev->mes.resource_1_addr); + } return 0; } diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index ec5b9ab67c5e..410c8d664336 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -61,6 +61,7 @@ enum MES_SCH_API_OPCODE { MES_SCH_API_MISC = 14, MES_SCH_API_UPDATE_ROOT_PAGE_TABLE = 15, MES_SCH_API_AMD_LOG = 16, + MES_SCH_API_SET_HW_RSRC_1 = 19, MES_SCH_API_MAX = 0xFF }; @@ -238,6 +239,26 @@ union MESAPI_SET_HW_RESOURCES { uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; }; +union MESAPI_SET_HW_RESOURCES_1 { + struct { + union MES_API_HEADER header; + struct MES_API_STATUS api_status; + uint64_t timestamp; + union { + struct { + uint32_t enable_mes_info_ctx : 1; + uint32_t reserved : 31; + }; + uint32_t uint32_all; + }; + uint64_t mes_info_ctx_mc_addr; + uint32_t mes_info_ctx_size; + uint32_t mes_kiq_unmap_timeout; // unit is 100ms + }; + + uint32_t max_dwords_in_api[API_FRAME_SIZE_IN_DWORDS]; +}; + union MESAPI__ADD_QUEUE { struct { union MES_API_HEADER header; -- cgit v1.2.3-70-g09d2 From 4b0cb230bdb71c23981acfa5e7b367c7dde02a41 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Tue, 2 Apr 2024 14:58:33 +0800 Subject: drm/amdgpu: retire UMC v12 mca_addr_to_pa RAS TA will handle it, the function is useless. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 - drivers/gpu/drm/amd/amdgpu/umc_v12_0.c | 105 ++------------------------------- drivers/gpu/drm/amd/amdgpu/umc_v12_0.h | 62 +------------------ 3 files changed, 7 insertions(+), 161 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 470a146f2f43..c4ec1358f3aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1457,7 +1457,6 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.channel_offs = UMC_V12_0_PER_CHANNEL_OFFSET; adev->umc.active_mask = adev->aid_mask; adev->umc.retire_unit = UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL; - adev->umc.channel_idx_tbl = &umc_v12_0_channel_idx_tbl[0][0][0]; if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) adev->umc.ras = &umc_v12_0_ras; break; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c index f46a176f9b55..a0122b22eda4 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.c @@ -28,28 +28,6 @@ #include "umc/umc_12_0_0_sh_mask.h" #include "mp/mp_13_0_6_sh_mask.h" -const uint32_t - umc_v12_0_channel_idx_tbl[] - [UMC_V12_0_UMC_INSTANCE_NUM] - [UMC_V12_0_CHANNEL_INSTANCE_NUM] = { - {{3, 7, 11, 15, 2, 6, 10, 14}, {1, 5, 9, 13, 0, 4, 8, 12}, - {19, 23, 27, 31, 18, 22, 26, 30}, {17, 21, 25, 29, 16, 20, 24, 28}}, - {{47, 43, 39, 35, 46, 42, 38, 34}, {45, 41, 37, 33, 44, 40, 36, 32}, - {63, 59, 55, 51, 62, 58, 54, 50}, {61, 57, 53, 49, 60, 56, 52, 48}}, - {{79, 75, 71, 67, 78, 74, 70, 66}, {77, 73, 69, 65, 76, 72, 68, 64}, - {95, 91, 87, 83, 94, 90, 86, 82}, {93, 89, 85, 81, 92, 88, 84, 80}}, - {{99, 103, 107, 111, 98, 102, 106, 110}, {97, 101, 105, 109, 96, 100, 104, 108}, - {115, 119, 123, 127, 114, 118, 122, 126}, {113, 117, 121, 125, 112, 116, 120, 124}} - }; - -/* mapping of MCA error address to normalized address */ -static const uint32_t umc_v12_0_ma2na_mapping[] = { - 0, 5, 6, 8, 9, 14, 12, 13, - 10, 11, 15, 16, 17, 18, 19, 20, - 21, 22, 23, 24, 25, 26, 27, 28, - 24, 7, 29, 30, -}; - static inline uint64_t get_umc_v12_0_reg_offset(struct amdgpu_device *adev, uint32_t node_inst, uint32_t umc_inst, @@ -192,79 +170,6 @@ static void umc_v12_0_query_ras_error_count(struct amdgpu_device *adev, umc_v12_0_reset_error_count(adev); } -static bool umc_v12_0_bit_wise_xor(uint32_t val) -{ - bool result = 0; - int i; - - for (i = 0; i < 32; i++) - result = result ^ ((val >> i) & 0x1); - - return result; -} - -static void umc_v12_0_mca_addr_to_pa(struct amdgpu_device *adev, - uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst, - uint32_t node_inst, - struct ta_ras_query_address_output *addr_out) -{ - uint32_t channel_index, i; - uint64_t na, soc_pa; - uint32_t bank_hash0, bank_hash1, bank_hash2, bank_hash3, col, row; - uint32_t bank0, bank1, bank2, bank3, bank; - - bank_hash0 = (err_addr >> UMC_V12_0_MCA_B0_BIT) & 0x1ULL; - bank_hash1 = (err_addr >> UMC_V12_0_MCA_B1_BIT) & 0x1ULL; - bank_hash2 = (err_addr >> UMC_V12_0_MCA_B2_BIT) & 0x1ULL; - bank_hash3 = (err_addr >> UMC_V12_0_MCA_B3_BIT) & 0x1ULL; - col = (err_addr >> 1) & 0x1fULL; - row = (err_addr >> 10) & 0x3fffULL; - - /* apply bank hash algorithm */ - bank0 = - bank_hash0 ^ (UMC_V12_0_XOR_EN0 & - (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR0) ^ - (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR0)))); - bank1 = - bank_hash1 ^ (UMC_V12_0_XOR_EN1 & - (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR1) ^ - (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR1)))); - bank2 = - bank_hash2 ^ (UMC_V12_0_XOR_EN2 & - (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR2) ^ - (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR2)))); - bank3 = - bank_hash3 ^ (UMC_V12_0_XOR_EN3 & - (umc_v12_0_bit_wise_xor(col & UMC_V12_0_COL_XOR3) ^ - (umc_v12_0_bit_wise_xor(row & UMC_V12_0_ROW_XOR3)))); - - bank = bank0 | (bank1 << 1) | (bank2 << 2) | (bank3 << 3); - err_addr &= ~0x3c0ULL; - err_addr |= (bank << UMC_V12_0_MCA_B0_BIT); - - na = 0x0; - /* convert mca error address to normalized address */ - for (i = 1; i < ARRAY_SIZE(umc_v12_0_ma2na_mapping); i++) - na |= ((err_addr >> i) & 0x1ULL) << umc_v12_0_ma2na_mapping[i]; - - channel_index = - adev->umc.channel_idx_tbl[node_inst * adev->umc.umc_inst_num * - adev->umc.channel_inst_num + - umc_inst * adev->umc.channel_inst_num + - ch_inst]; - /* translate umc channel address to soc pa, 3 parts are included */ - soc_pa = ADDR_OF_32KB_BLOCK(na) | - ADDR_OF_256B_BLOCK(channel_index) | - OFFSET_IN_256B_BLOCK(na); - - /* the umc channel bits are not original values, they are hashed */ - UMC_V12_0_SET_CHANNEL_HASH(channel_index, soc_pa); - - addr_out->pa.pa = soc_pa; - addr_out->pa.bank = bank; - addr_out->pa.channel_idx = channel_index; -} - static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, struct ta_ras_query_address_input *addr_in) @@ -275,10 +180,12 @@ static void umc_v12_0_convert_error_address(struct amdgpu_device *adev, err_addr = addr_in->ma.err_addr; addr_in->addr_type = TA_RAS_MCA_TO_PA; - if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) - /* fallback to old path if fail to get pa from psp */ - umc_v12_0_mca_addr_to_pa(adev, err_addr, addr_in->ma.ch_inst, - addr_in->ma.umc_inst, addr_in->ma.node_inst, &addr_out); + if (psp_ras_query_address(&adev->psp, addr_in, &addr_out)) { + dev_warn(adev->dev, "Failed to query RAS physical address for 0x%llx", + err_addr); + + return; + } soc_pa = addr_out.pa.pa; bank = addr_out.pa.bank; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h index 5973bfb14fce..1d5f44dcffdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v12_0.h @@ -55,67 +55,12 @@ #define UMC_V12_0_NA_MAP_PA_NUM 8 /* R13 bit shift should be considered, double the number */ #define UMC_V12_0_BAD_PAGE_NUM_PER_CHANNEL (UMC_V12_0_NA_MAP_PA_NUM * 2) -/* bank bits in MCA error address */ -#define UMC_V12_0_MCA_B0_BIT 6 -#define UMC_V12_0_MCA_B1_BIT 7 -#define UMC_V12_0_MCA_B2_BIT 8 -#define UMC_V12_0_MCA_B3_BIT 9 + /* column bits in SOC physical address */ #define UMC_V12_0_PA_C2_BIT 15 #define UMC_V12_0_PA_C4_BIT 21 /* row bits in SOC physical address */ #define UMC_V12_0_PA_R13_BIT 35 -/* channel index bits in SOC physical address */ -#define UMC_V12_0_PA_CH4_BIT 12 -#define UMC_V12_0_PA_CH5_BIT 13 -#define UMC_V12_0_PA_CH6_BIT 14 - -/* bank hash settings */ -#define UMC_V12_0_XOR_EN0 1 -#define UMC_V12_0_XOR_EN1 1 -#define UMC_V12_0_XOR_EN2 1 -#define UMC_V12_0_XOR_EN3 1 -#define UMC_V12_0_COL_XOR0 0x0 -#define UMC_V12_0_COL_XOR1 0x0 -#define UMC_V12_0_COL_XOR2 0x800 -#define UMC_V12_0_COL_XOR3 0x1000 -#define UMC_V12_0_ROW_XOR0 0x11111 -#define UMC_V12_0_ROW_XOR1 0x22222 -#define UMC_V12_0_ROW_XOR2 0x4444 -#define UMC_V12_0_ROW_XOR3 0x8888 - -/* channel hash settings */ -#define UMC_V12_0_HASH_4K 0 -#define UMC_V12_0_HASH_64K 1 -#define UMC_V12_0_HASH_2M 1 -#define UMC_V12_0_HASH_1G 1 -#define UMC_V12_0_HASH_1T 1 - -/* XOR some bits of PA into CH4~CH6 bits (bits 12~14 of PA), - * hash bit is only effective when related setting is enabled - */ -#define UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) ((((channel_idx) >> 5) & 0x1) ^ \ - (((pa) >> 20) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ - (((pa) >> 27) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ - (((pa) >> 34) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ - (((pa) >> 41) & 0x1ULL & UMC_V12_0_HASH_1T)) -#define UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) ((((channel_idx) >> 6) & 0x1) ^ \ - (((pa) >> 21) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ - (((pa) >> 28) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ - (((pa) >> 35) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ - (((pa) >> 42) & 0x1ULL & UMC_V12_0_HASH_1T)) -#define UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) ((((channel_idx) >> 4) & 0x1) ^ \ - (((pa) >> 19) & 0x1ULL & UMC_V12_0_HASH_64K) ^ \ - (((pa) >> 26) & 0x1ULL & UMC_V12_0_HASH_2M) ^ \ - (((pa) >> 33) & 0x1ULL & UMC_V12_0_HASH_1G) ^ \ - (((pa) >> 40) & 0x1ULL & UMC_V12_0_HASH_1T) ^ \ - (((pa) >> 47) & 0x1ULL & UMC_V12_0_HASH_4K)) -#define UMC_V12_0_SET_CHANNEL_HASH(channel_idx, pa) do { \ - (pa) &= ~(0x7ULL << UMC_V12_0_PA_CH4_BIT); \ - (pa) |= (UMC_V12_0_CHANNEL_HASH_CH4(channel_idx, pa) << UMC_V12_0_PA_CH4_BIT); \ - (pa) |= (UMC_V12_0_CHANNEL_HASH_CH5(channel_idx, pa) << UMC_V12_0_PA_CH5_BIT); \ - (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \ - } while (0) #define MCA_IPID_LO_2_UMC_CH(_ipid_lo) (((((_ipid_lo) >> 20) & 0x1) * 4) + \ (((_ipid_lo) >> 12) & 0xF)) @@ -127,11 +72,6 @@ bool umc_v12_0_is_correctable_error(struct amdgpu_device *adev, uint64_t mc_umc_ typedef bool (*check_error_type_func)(struct amdgpu_device *adev, uint64_t mc_umc_status); -extern const uint32_t - umc_v12_0_channel_idx_tbl[] - [UMC_V12_0_UMC_INSTANCE_NUM] - [UMC_V12_0_CHANNEL_INSTANCE_NUM]; - extern struct amdgpu_umc_ras umc_v12_0_ras; #endif -- cgit v1.2.3-70-g09d2 From 05e40141685fd6aaedbde334b404c8dfbbd83fd6 Mon Sep 17 00:00:00 2001 From: ZhenGuo Yin Date: Tue, 2 Apr 2024 11:41:05 +0800 Subject: drm/amdgpu: clear set_q_mode_offs when VM changed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [Why] set_q_mode_offs don't get cleared after GPU reset, nexting SET_Q_MODE packet to init shadow memory will be skiped, hence there has a page fault. [How] VM flush is needed after GPU reset, clear set_q_mode_offs when emitting VM flush. Fixes: 8bc75586ea01 ("drm/amdgpu: workaround to avoid SET_Q_MODE packets v2") Reviewed-by: Christian König Signed-off-by: ZhenGuo Yin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 7a906318e451..c11c6299711e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -5465,6 +5465,7 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, /* Make sure that we can't skip the SET_Q_MODE packets when the VM * changed in any way. */ + ring->set_q_mode_offs = 0; ring->set_q_mode_ptr = NULL; } -- cgit v1.2.3-70-g09d2 From 7d3ca076641e8cf04d613c0997eaed6dde5a38b1 Mon Sep 17 00:00:00 2001 From: Asad Kamal Date: Tue, 2 Apr 2024 18:11:27 +0800 Subject: drm/amd/pm: Report uclk/sclk current limits Use OD (pp_od_clk_voltage) interface to report current limits, default or those set by user, for SCLK and UCLK on smu_v_13_0_6 Signed-off-by: Asad Kamal Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 7998c188db5b..56ed0cfb675e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1010,8 +1010,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, switch (type) { case SMU_OD_SCLK: - size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK"); - fallthrough; + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + pstate_table->gfxclk_pstate.curr.min, + pstate_table->gfxclk_pstate.curr.max); + break; case SMU_SCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &now); @@ -1052,8 +1055,11 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - size += sysfs_emit_at(buf, size, "%s:\n", "MCLK"); - fallthrough; + size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); + size += sysfs_emit_at(buf, size, "0: %uMhz\n1: %uMhz\n", + pstate_table->uclk_pstate.curr.min, + pstate_table->uclk_pstate.curr.max); + break; case SMU_MCLK: ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK, &now); -- cgit v1.2.3-70-g09d2 From 8de7948da75cc7af4f54ba025290e5067ceeeec8 Mon Sep 17 00:00:00 2001 From: Asad Kamal Date: Tue, 2 Apr 2024 18:16:39 +0800 Subject: drm/amd/pm: Update uclk/sclk limit report format Use OD (pp_od_clk_voltage) interface to report current limits, default or those set by user, for SCLK and UCLK on aldebaran. Signed-off-by: Asad Kamal Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index acf41db3ef72..ce941fbb9cfb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -759,8 +759,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, switch (type) { case SMU_OD_SCLK: - *offset += sysfs_emit_at(buf, *offset, "%s:\n", "GFXCLK"); - fallthrough; + *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_SCLK"); + *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n", + pstate_table->gfxclk_pstate.curr.min, + pstate_table->gfxclk_pstate.curr.max); + return 0; case SMU_SCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value); if (ret) { @@ -788,8 +791,11 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, break; case SMU_OD_MCLK: - *offset += sysfs_emit_at(buf, *offset, "%s:\n", "MCLK"); - fallthrough; + *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_MCLK"); + *offset += sysfs_emit_at(buf, *offset, "0: %uMhz\n1: %uMhz\n", + pstate_table->uclk_pstate.curr.min, + pstate_table->uclk_pstate.curr.max); + return 0; case SMU_MCLK: ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value); if (ret) { @@ -850,7 +856,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, } switch (type) { - case SMU_OD_SCLK: case SMU_SCLK: for (i = 0; i < display_levels; i++) { clock_mhz = freq_values[i]; @@ -863,7 +868,6 @@ static int aldebaran_emit_clk_levels(struct smu_context *smu, } break; - case SMU_OD_MCLK: case SMU_MCLK: case SMU_SOCCLK: case SMU_FCLK: -- cgit v1.2.3-70-g09d2 From 052965fba1979b89e2530c6e0c7ea24770285803 Mon Sep 17 00:00:00 2001 From: Srinivasan Shanmugam Date: Mon, 1 Apr 2024 19:18:27 +0530 Subject: drm/amd/display: Add missing parameter desc in dc_commit_streams This commit removes the lines that describe the 'streams' and 'stream_count' parameters and adds a line to describe the 'params' parameter, which was missing from the original comment block. Fixes the below with gcc W=1: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:2138: warning: Function parameter or member 'params' not described in 'dc_commit_streams' drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:2138: warning: Excess function parameter 'streams' description in 'dc_commit_streams' drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc.c:2138: warning: Excess function parameter 'stream_count' description in 'dc_commit_streams' Fixes: e779f4587f61 ("drm/amd/display: Add handling for DC power mode") Cc: Joshua Aberback Cc: Rodrigo Siqueira Cc: Roman Li Cc: Aurabindo Pillai Cc: Tom Chung Signed-off-by: Srinivasan Shanmugam Reviewed-by: Tom Chung Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5a93278fa246..0ffa79d83bc7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2125,8 +2125,7 @@ static bool commit_minimal_transition_state(struct dc *dc, * dc_commit_streams - Commit current stream state * * @dc: DC object with the commit state to be configured in the hardware - * @streams: Array with a list of stream state - * @stream_count: Total of streams + * @params: Parameters for the commit, including the streams to be committed * * Function responsible for commit streams change to the hardware. * -- cgit v1.2.3-70-g09d2 From b41f742d6fa6bc9799ff670a9b081f4ac6b911a0 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 25 Mar 2024 12:33:02 +0530 Subject: drm/amdgpu: Set fatal errror detected flag earlier In case of fatal errors, set FED status when interrupt is received. Set the flag on other devices in the hive before RAS recovery work. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 41 ++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index b8c7d0bf8fb1..352ce16a0963 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2399,6 +2399,19 @@ out: return ret; } +static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev, + struct amdgpu_hive_info *hive, bool status) +{ + struct amdgpu_device *tmp_adev; + + if (hive) { + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) + amdgpu_ras_set_fed(tmp_adev, status); + } else { + amdgpu_ras_set_fed(adev, status); + } +} + static void amdgpu_ras_do_recovery(struct work_struct *work) { struct amdgpu_ras *ras = @@ -2408,8 +2421,21 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) struct list_head device_list, *device_list_handle = NULL; struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); - if (hive) + if (hive) { atomic_set(&hive->ras_recovery, 1); + + /* If any device which is part of the hive received RAS fatal + * error interrupt, set fatal error status on all. This + * condition will need a recovery, and flag will be cleared + * as part of recovery. + */ + list_for_each_entry(remote_adev, &hive->device_list, + gmc.xgmi.head) + if (amdgpu_ras_get_fed_status(remote_adev)) { + amdgpu_ras_set_fed_all(adev, hive, true); + break; + } + } if (!ras->disable_ras_err_cnt_harvest) { /* Build list of devices to query RAS related errors */ @@ -2454,18 +2480,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET; set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - /* For any RAS error that needs a full reset to - * recover, set the fatal error status - */ - if (hive) { - list_for_each_entry(remote_adev, - &hive->device_list, - gmc.xgmi.head) - amdgpu_ras_set_fed(remote_adev, - true); - } else { - amdgpu_ras_set_fed(adev, true); - } psp_fatal_error_recovery_quirk(&adev->psp); } } @@ -3550,6 +3564,7 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error" "(ERREVENT_ATHUB_INTERRUPT) detected!\n"); + amdgpu_ras_set_fed(adev, true); ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; amdgpu_ras_reset_gpu(adev); } -- cgit v1.2.3-70-g09d2 From 7b8081ea67533628310bb23b84f887fca4319a86 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 21 Mar 2024 18:52:39 +0530 Subject: drm/amd/pm: Add PMFW message and capability flags Add flags to categorize messages and PMFW capabilities. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 7 ++++--- drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h | 7 +++++++ drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 2 +- 3 files changed, 12 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 5dd092703b8d..0917dec8efe3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -459,7 +459,7 @@ struct smu_umd_pstate_table { struct cmn2asic_msg_mapping { int valid_mapping; int map_to; - int valid_in_vf; + uint32_t flags; }; struct cmn2asic_mapping { @@ -539,6 +539,7 @@ struct smu_context { uint32_t smc_driver_if_version; uint32_t smc_fw_if_version; uint32_t smc_fw_version; + uint32_t smc_fw_caps; bool uploading_custom_pp_table; bool dc_controlled_by_gpio; @@ -1485,8 +1486,8 @@ enum smu_baco_seq { BACO_SEQ_COUNT, }; -#define MSG_MAP(msg, index, valid_in_vf) \ - [SMU_MSG_##msg] = {1, (index), (valid_in_vf)} +#define MSG_MAP(msg, index, flags) \ + [SMU_MSG_##msg] = {1, (index), (flags)} #define CLK_MAP(clk, index) \ [SMU_##clk] = {1, (index)} diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index af427cc7dbb8..c48214e3dc8e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -445,4 +445,11 @@ enum smu_feature_mask { SMU_FEATURE_COUNT, }; +/* Message category flags */ +#define SMU_MSG_VF_FLAG (1U << 0) +#define SMU_MSG_RAS_PRI (1U << 1) + +/* Firmware capability flags */ +#define SMU_FW_CAP_RAS_PRI (1U << 0) + #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index b8dbd4e25348..3227e514e8ae 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -437,7 +437,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu, return -EINVAL; if (amdgpu_sriov_vf(smu->adev) && - !msg_mapping.valid_in_vf) + !(msg_mapping.flags & SMU_MSG_VF_FLAG)) return -EACCES; return msg_mapping.map_to; -- cgit v1.2.3-70-g09d2 From 301661b00a0582555d38c7ea0f94235729b00963 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 21 Mar 2024 19:19:48 +0530 Subject: drm/amd/pm: Add special handling for RAS messages When a RAS fatal error is detected, PMFW will only process priority messages. Other messages won't be taken up for processing and therefore won't get any response in such a state. Add logic to filter out non-priority messages when RAS error is detected. Also, don't poll response response status register before sending priority messages. Use firmware capability flag to determine whether to filter priority messages. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 65 ++++++++++++++++++++++++++++++---- 1 file changed, 59 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 3227e514e8ae..6d1c3af927ca 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -235,6 +235,50 @@ static void __smu_cmn_send_msg(struct smu_context *smu, WREG32(smu->msg_reg, msg); } +static inline uint32_t __smu_cmn_get_msg_flags(struct smu_context *smu, + enum smu_message_type msg) +{ + return smu->message_map[msg].flags; +} + +static int __smu_cmn_ras_filter_msg(struct smu_context *smu, + enum smu_message_type msg, bool *poll) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t flags, resp; + bool fed_status; + + flags = __smu_cmn_get_msg_flags(smu, msg); + *poll = true; + + /* When there is RAS fatal error, FW won't process non-RAS priority + * messages. Don't allow any messages other than RAS priority messages. + */ + fed_status = amdgpu_ras_get_fed_status(adev); + if (fed_status) { + if (!(flags & SMU_MSG_RAS_PRI)) { + dev_dbg(adev->dev, + "RAS error detected, skip sending %s", + smu_get_message_name(smu, msg)); + return -EACCES; + } + + /* FW will ignore non-priority messages when a RAS fatal error + * is detected. Hence it is possible that a previous message + * wouldn't have got response. Allow to continue without polling + * for response status for priority messages. + */ + resp = RREG32(smu->resp_reg); + dev_dbg(adev->dev, + "Sending RAS priority message %s response status: %x", + smu_get_message_name(smu, msg), resp); + if (resp == 0) + *poll = false; + } + + return 0; +} + static int __smu_cmn_send_debug_msg(struct smu_context *smu, u32 msg, u32 param) @@ -354,6 +398,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, { struct amdgpu_device *adev = smu->adev; int res, index; + bool poll = true; u32 reg; if (adev->no_hw_access) @@ -366,12 +411,20 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, return index == -EACCES ? 0 : index; mutex_lock(&smu->message_lock); - reg = __smu_cmn_poll_stat(smu); - res = __smu_cmn_reg2errno(smu, reg); - if (reg == SMU_RESP_NONE || - res == -EREMOTEIO) { - __smu_cmn_reg_print_error(smu, reg, index, param, msg); - goto Out; + + if (smu->smc_fw_caps & SMU_FW_CAP_RAS_PRI) { + res = __smu_cmn_ras_filter_msg(smu, msg, &poll); + if (res) + goto Out; + } + + if (poll) { + reg = __smu_cmn_poll_stat(smu); + res = __smu_cmn_reg2errno(smu, reg); + if (reg == SMU_RESP_NONE || res == -EREMOTEIO) { + __smu_cmn_reg_print_error(smu, reg, index, param, msg); + goto Out; + } } __smu_cmn_send_msg(smu, (uint16_t) index, param); reg = __smu_cmn_poll_stat(smu); -- cgit v1.2.3-70-g09d2 From 24c30a7b12b148cb9002d5b73c4c229138e39eb2 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 21 Mar 2024 19:40:41 +0530 Subject: drm/amd/pm: Categorize RAS messages on SMUv13.0.6 Set RAS priority handling capability for SMUv13.0.6 SOCs and categorize RAS priority messages allowed. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 56ed0cfb675e..ade8ec7a44b9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -144,7 +144,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), - MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, 0), + MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), @@ -167,10 +167,10 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(GetCTFLimit, PPSMC_MSG_GetCTFLimit, 0), MSG_MAP(GetThermalLimit, PPSMC_MSG_ReadThrottlerLimit, 0), MSG_MAP(ClearMcaOnRead, PPSMC_MSG_ClearMcaOnRead, 0), - MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, 0), - MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, 0), - MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, 0), - MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, 0), + MSG_MAP(QueryValidMcaCount, PPSMC_MSG_QueryValidMcaCount, SMU_MSG_RAS_PRI), + MSG_MAP(QueryValidMcaCeCount, PPSMC_MSG_QueryValidMcaCeCount, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankDumpDW, PPSMC_MSG_McaBankDumpDW, SMU_MSG_RAS_PRI), + MSG_MAP(McaBankCeDumpDW, PPSMC_MSG_McaBankCeDumpDW, SMU_MSG_RAS_PRI), MSG_MAP(SelectPLPDMode, PPSMC_MSG_SelectPLPDMode, 0), MSG_MAP(RmaDueToBadPageThreshold, PPSMC_MSG_RmaDueToBadPageThreshold, 0), }; @@ -3224,6 +3224,7 @@ void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) smu->feature_map = smu_v13_0_6_feature_mask_map; smu->table_map = smu_v13_0_6_table_map; smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; + smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI; smu_v13_0_set_smu_mailbox_registers(smu); amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs); -- cgit v1.2.3-70-g09d2 From dfb15c4ab58658aaa6161b546e7eb852ae7cc132 Mon Sep 17 00:00:00 2001 From: Zhigang Luo Date: Mon, 18 Mar 2024 14:13:10 -0400 Subject: amd/amdkfd: sync all devices to wait all processes being evicted If there are more than one device doing reset in parallel, the first device will call kfd_suspend_all_processes() to evict all processes on all devices, this call takes time to finish. other device will start reset and recover without waiting. if the process has not been evicted before doing recover, it will be restored, then caused page fault. Signed-off-by: Zhigang Luo Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 041ec3de55e7..719d6d365e15 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -960,7 +960,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { struct kfd_node *node; int i; - int count; if (!kfd->init_complete) return; @@ -968,12 +967,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) /* for runtime suspend, skip locking kfd */ if (!run_pm) { mutex_lock(&kfd_processes_mutex); - count = ++kfd_locked; - mutex_unlock(&kfd_processes_mutex); - /* For first KFD device suspend all the KFD processes */ - if (count == 1) + if (++kfd_locked == 1) kfd_suspend_all_processes(); + mutex_unlock(&kfd_processes_mutex); } for (i = 0; i < kfd->num_nodes; i++) { @@ -984,7 +981,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { - int ret, count, i; + int ret, i; if (!kfd->init_complete) return 0; @@ -998,12 +995,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) /* for runtime resume, skip unlocking kfd */ if (!run_pm) { mutex_lock(&kfd_processes_mutex); - count = --kfd_locked; - mutex_unlock(&kfd_processes_mutex); - - WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); - if (count == 0) + if (--kfd_locked == 0) ret = kfd_resume_all_processes(); + WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error"); + mutex_unlock(&kfd_processes_mutex); } return ret; -- cgit v1.2.3-70-g09d2 From d1999b4017d485a3168b4ba1316937c82454165a Mon Sep 17 00:00:00 2001 From: Zhigang Luo Date: Wed, 20 Mar 2024 10:40:27 -0400 Subject: amd/amdgpu: improve VF recover time 1. change AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT from 30 to 5. 2. set fatel error detected flag. Signed-off-by: Zhigang Luo Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f830024cffca..170da4551ed5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4994,6 +4994,7 @@ retry: r = amdgpu_virt_reset_gpu(adev); if (r) return r; + amdgpu_ras_set_fed(adev, false); amdgpu_irq_gpu_reset_resume_helper(adev); /* some sw clean up VF needs to do before recover */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 461753904dea..54ab51a4ada7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -598,6 +598,7 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) adev->virt.vf2pf_update_retry_cnt++; if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) && amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) { + amdgpu_ras_set_fed(adev, true); if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->virt.flr_work)) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index a9f2f0c4f799..642f1fd287d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -52,7 +52,7 @@ /* tonga/fiji use this offset */ #define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 -#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 30 +#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 5 enum amdgpu_sriov_vf_mode { SRIOV_VF_MODE_BARE_METAL = 0, -- cgit v1.2.3-70-g09d2 From 0d38f6009e4e4e511fb5c3c673d54bf0c242c4b7 Mon Sep 17 00:00:00 2001 From: Lewis Huang Date: Thu, 21 Mar 2024 16:14:43 +0800 Subject: drm/amd/display: Add option to configure mapping policy for edp0 on dp1 [Why] We want flexibility to choose how pwrseq instance is mapped to eDP panel [How] Add configuration option to choose the pwrseq mapping policy. When enabled, allow fixed mapping between DIG inst to pwrseq inst. Reviewed-by: Anthony Koo Acked-by: Hamza Mahfooz Signed-off-by: Lewis Huang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 46be4845ec28..db87f9cdd567 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -437,6 +437,7 @@ struct dc_config { bool usb4_bw_alloc_support; bool allow_0_dtb_clk; bool use_assr_psp_message; + bool support_edp0_on_dp1; }; enum visual_confirm { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 281be20b1a10..20c6fe48567f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -173,5 +173,12 @@ void dcn31_panel_cntl_construct( break; } - dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst; + if (dcn31_panel_cntl->base.ctx->dc->config.support_edp0_on_dp1) + //If supported, power sequencer mapping shall follow the DIG instance + dcn31_panel_cntl->base.pwrseq_inst = pwrseq_inst; + else + /* If not supported, pwrseq will be assigned in order, + * so first pwrseq will be assigned to first panel instance (legacy behavior) + */ + dcn31_panel_cntl->base.pwrseq_inst = dcn31_panel_cntl->base.inst; } -- cgit v1.2.3-70-g09d2 From af1c41858da107eba7e0cf7b23b08c1957014d70 Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Sat, 23 Mar 2024 12:02:54 -0600 Subject: drm/amd/display: Return max resolution supported by DWB mode_config's max width x height is 4096x2160 and is higher than DWB's max resolution 3840x2160 which is returned instead. Cc: stable@vger.kernel.org Reviewed-by: Harry Wentland Acked-by: Hamza Mahfooz Signed-off-by: Alex Hung Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c index 16e72d623630..08c494a7a21b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c @@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder, static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - - return drm_add_modes_noedid(connector, dev->mode_config.max_width, - dev->mode_config.max_height); + /* Maximum resolution supported by DWB */ + return drm_add_modes_noedid(connector, 3840, 2160); } static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector, -- cgit v1.2.3-70-g09d2 From d6d6561f936bc8066537c3fa5fa352f2ea025d55 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Wed, 3 Apr 2024 17:28:44 +0800 Subject: drm/amdgpu: fix incorrect number of active RBs for gfx11 The RB bitmap should be global active RB bitmap & active RB bitmap based on active SA. Signed-off-by: Tim Huang Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index c11c6299711e..ae6a0d19e247 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1635,7 +1635,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa)); } - active_rb_bitmap |= global_active_rb_bitmap; + active_rb_bitmap &= global_active_rb_bitmap; adev->gfx.config.backend_enable_mask = active_rb_bitmap; adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); } -- cgit v1.2.3-70-g09d2 From 4a5b171299e59d51322f4c6bd376c5acbeca0a4a Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Fri, 22 Mar 2024 15:02:45 -0400 Subject: drm/amd/display: always reset ODM mode in context when adding first plane [why] In current implemenation ODM mode is only reset when the last plane is removed from dc state. For any dc validate we will always remove all current planes and add new planes. However when switching from no planes to 1 plane, ODM mode is not reset because no planes get removed. This has caused an issue where we kept ODM combine when it should have been remove when a plane is added. The change is to reset ODM mode when adding the first plane. Cc: stable@vger.kernel.org Reviewed-by: Alvin Lee Acked-by: Hamza Mahfooz Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_state.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index d1d326e9b9b6..4f9ef07d29ec 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -458,6 +458,15 @@ bool dc_state_add_plane( goto out; } + if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) + /* ODM combine could prevent us from supporting more planes + * we will reset ODM slice count back to 1 when all planes have + * been removed to maximize the amount of planes supported when + * new planes are added. + */ + resource_update_pipes_for_stream_with_slice_count( + state, dc->current_state, dc->res_pool, stream, 1); + otg_master_pipe = resource_get_otg_master_for_stream( &state->res_ctx, stream); if (otg_master_pipe) -- cgit v1.2.3-70-g09d2 From 339126b5294468409bfb4cd49ec4745d1b7bf3bf Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Fri, 22 Mar 2024 15:29:56 -0400 Subject: drm/amd/display: fix an incorrect ODM policy assigned for subvp [why] When Subvp pipe's index is smaller than main pipe's index, the main pipe's ODM policy is not yet assigned. If we assign subvp pipe's ODM policy based on main pipe, we will assign uninitialized ODM policy. [how] Instead of copying main pipe's policy we copy the main pipe ODM policy logic. So it doesn't matter whether if main pipe's ODM policy is set, phantom pipe will always have the same policy because it running the same calcualtion to derive ODM policy. Cc: stable@vger.kernel.org Reviewed-by: Alvin Lee Acked-by: Hamza Mahfooz Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- .../amd/display/dc/resource/dcn32/dcn32_resource.c | 28 ++++++++++++---------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index e2bff9b9d55a..9aa39bd25be9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1824,6 +1824,7 @@ int dcn32_populate_dml_pipes_from_context( int num_subvp_main = 0; int num_subvp_phantom = 0; int num_subvp_none = 0; + int odm_slice_count; dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); @@ -1852,7 +1853,7 @@ int dcn32_populate_dml_pipes_from_context( mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (mall_type == SUBVP_MAIN) { if (resource_is_pipe_type(pipe, OTG_MASTER)) - subvp_main_pipe_index = pipe_cnt; + subvp_main_pipe_index = i; } pipe_cnt++; } @@ -1878,22 +1879,23 @@ int dcn32_populate_dml_pipes_from_context( mall_type = dc_state_get_pipe_subvp_type(context, pipe); if (single_display_subvp && (mall_type == SUBVP_PHANTOM)) { if (subvp_main_pipe_index < 0) { + odm_slice_count = -1; ASSERT(0); } else { - pipes[pipe_cnt].pipe.dest.odm_combine_policy = - pipes[subvp_main_pipe_index].pipe.dest.odm_combine_policy; + odm_slice_count = resource_get_odm_slice_count(&res_ctx->pipe_ctx[subvp_main_pipe_index]); } } else { - switch (resource_get_odm_slice_count(pipe)) { - case 2: - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; - break; - case 4: - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; - break; - default: - pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; - } + odm_slice_count = resource_get_odm_slice_count(pipe); + } + switch (odm_slice_count) { + case 2: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; + break; + case 4: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_4to1; + break; + default: + pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; } } else { pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; -- cgit v1.2.3-70-g09d2 From 19407237e78f11c28b136aeacbc15442e5a48d80 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Tue, 2 Apr 2024 09:07:54 +0530 Subject: drm/amd/pm: Allow setting soft max frequency in VF Setting soft max frequency for MCLK is allowed in 1VF mode in SMUv13.0.6 SOCs. Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index ade8ec7a44b9..16179e170874 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -138,7 +138,7 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), - MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), @@ -1676,6 +1676,11 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu, if (clk_type == SMU_UCLK) { if (max == pstate_table->uclk_pstate.curr.max) return 0; + /* For VF, only allowed in FW versions 85.102 or greater */ + if (amdgpu_sriov_vf(adev) && + ((smu->smc_fw_version < 0x556600) || + (adev->flags & AMD_IS_APU))) + return -EOPNOTSUPP; /* Only max clock limiting is allowed for UCLK */ ret = smu_v13_0_set_soft_freq_limited_range( smu, SMU_UCLK, 0, max); -- cgit v1.2.3-70-g09d2 From 5324e2b205a2d7252356e95152507678d0b7da96 Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Sat, 16 Mar 2024 00:31:19 -0400 Subject: drm/amd/display: Add driver support for future FAMS versions [WHY&HOW] Changes to support future versions of FAMS. Reviewed-by: Alvin Lee Acked-by: Hamza Mahfooz Signed-off-by: Dillon Varone Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 2 -- drivers/gpu/drm/amd/display/dc/core/dc.c | 5 ++++- .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 3 ++- drivers/gpu/drm/amd/display/dc/core/dc_state.c | 1 + drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 20 ++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc.h | 3 +++ drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c | 2 ++ drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h | 1 + drivers/gpu/drm/amd/display/dc/dc_plane_priv.h | 1 + .../gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c | 2 +- drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h | 3 ++- 11 files changed, 37 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 645a8991a830..bc16db69a663 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -44,8 +44,6 @@ #include "bios_parser_common.h" -#include "dc.h" - #define THREE_PERCENT_OF_10000 300 #define LAST_RECORD_TYPE 0xff diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0ffa79d83bc7..145cdab92ca0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -36,6 +36,7 @@ #include "resource.h" #include "dc_state.h" #include "dc_state_priv.h" +#include "dc_plane_priv.h" #include "gpio_service_interface.h" #include "clk_mgr.h" @@ -3561,6 +3562,7 @@ static void commit_planes_for_stream_fast(struct dc *dc, int i, j; struct pipe_ctx *top_pipe_to_program = NULL; struct dc_stream_status *stream_status = NULL; + dc_exit_ips_for_hw_access(dc); dc_z10_restore(dc); @@ -3618,7 +3620,8 @@ static void commit_planes_for_stream_fast(struct dc *dc, context->block_sequence, &(context->block_sequence_steps), top_pipe_to_program, - stream_status); + stream_status, + context); hwss_execute_sequence(dc, context->block_sequence, context->block_sequence_steps); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index df0f23afc8bb..5c1d3017aefd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -560,7 +560,8 @@ void hwss_build_fast_sequence(struct dc *dc, struct block_sequence block_sequence[], unsigned int *num_steps, struct pipe_ctx *pipe_ctx, - struct dc_stream_status *stream_status) + struct dc_stream_status *stream_status, + struct dc_state *context) { struct dc_plane_state *plane = pipe_ctx->plane_state; struct dc_stream_state *stream = pipe_ctx->stream; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 4f9ef07d29ec..bf889bdd3925 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -915,3 +915,4 @@ struct dc_stream_state *dc_state_get_stream_from_id(const struct dc_state *state return stream; } + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index ea624e000ec0..067f6555cfdf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -60,6 +60,26 @@ void dc_plane_destruct(struct dc_plane_state *plane_state) // no more pointers to free within dc_plane_state } + +/* dc_state is passed in separately since it may differ from the current dc state accessible from plane_state e.g. + * if the driver is doing an update from an old context to a new one and the caller wants the pipe mask for the new + * context rather than the existing one + */ +uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state) +{ + uint8_t pipe_mask = 0; + int i; + + for (i = 0; i < plane_state->ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &dc_state->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->plane_state == plane_state && pipe_ctx->plane_res.hubp) + pipe_mask |= 1 << pipe_ctx->plane_res.hubp->inst; + } + + return pipe_mask; +} + /******************************************************************************* * Public functions ******************************************************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index db87f9cdd567..54534df73e83 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -44,6 +44,8 @@ #include "dml2/dml2_wrapper.h" +#include "dmub/inc/dmub_cmd.h" + struct abm_save_restore; /* forward declaration */ @@ -219,6 +221,7 @@ struct dc_dmub_caps { bool mclk_sw; bool subvp_psr; bool gecc_enable; + uint8_t fams_ver; }; struct dc_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 4c98cd066b8f..2293a92df3be 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -35,6 +35,7 @@ #include "resource.h" #include "clk_mgr.h" #include "dc_state_priv.h" +#include "dc_plane_priv.h" #define CTX dc_dmub_srv->ctx #define DC_LOGGER CTX->logger @@ -1593,3 +1594,4 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com return result; } + diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index c0a512a12531..2c5866211f60 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -35,6 +35,7 @@ struct pipe_ctx; struct dc_crtc_timing_adjust; struct dc_crtc_timing; struct dc_state; +struct dc_surface_update; struct dc_reg_helper_state { bool gather_in_progress; diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h index 9ee184c1df00..ab13335f1d01 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h @@ -30,5 +30,6 @@ void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state); void dc_plane_destruct(struct dc_plane_state *plane_state); +uint8_t dc_plane_get_pipe_mask(struct dc_state *dc_state, const struct dc_plane_state *plane_state); #endif /* _DC_PLANE_PRIV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index 9aea4a088652..9f1a86ddadb5 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -956,7 +956,7 @@ void dcn32_init_hw(struct dc *dc) dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch_ver; if (dc->ctx->dmub_srv->dmub->fw_version < - DMUB_FW_VERSION(7, 0, 35)) { + DMUB_FW_VERSION(7, 0, 35)) { dc->debug.force_disable_subvp = true; dc->debug.disable_fpo_optimizations = true; } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 659ce11ad446..7c339e7e7117 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -482,7 +482,8 @@ void hwss_build_fast_sequence(struct dc *dc, struct block_sequence block_sequence[], unsigned int *num_steps, struct pipe_ctx *pipe_ctx, - struct dc_stream_status *stream_status); + struct dc_stream_status *stream_status, + struct dc_state *context); void hwss_send_dmcub_cmd(union block_sequence_params *params); -- cgit v1.2.3-70-g09d2 From 24e9727b39caea8e9009d8f5bea582623182dda0 Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Thu, 21 Mar 2024 13:49:43 -0400 Subject: drm/amd/display: Do not recursively call manual trigger programming [WHY&HOW] We should not be recursively calling the manual trigger programming function when FAMS is not in use. Cc: stable@vger.kernel.org Reviewed-by: Alvin Lee Acked-by: Hamza Mahfooz Signed-off-by: Dillon Varone Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index f07a4c7e48bc..52eab8fccb7f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc) OTG_V_TOTAL_MAX_SEL, 1, OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); } } -- cgit v1.2.3-70-g09d2 From 27e718ac8b8194d13eee5738c4d3fd247736186e Mon Sep 17 00:00:00 2001 From: Fudongwang Date: Tue, 26 Mar 2024 16:03:16 +0800 Subject: drm/amd/display: fix disable otg wa logic in DCN316 [Why] Wrong logic cause screen corruption. [How] Port logic from DCN35/314. Cc: stable@vger.kernel.org Reviewed-by: Nicholas Kazlauskas Acked-by: Hamza Mahfooz Signed-off-by: Fudongwang Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index e4ed888f8403..20ca7afa9cb4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa( return display_count; } -static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) +static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, + bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = safe_to_lower + ? &context->res_ctx.pipe_ctx[i] + : &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || - dc_is_virtual_signal(pipe->stream->signal))) { + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc)) { if (disable) { - pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); @@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn316_disable_otg_wa(clk_mgr_base, context, true); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn316_disable_otg_wa(clk_mgr_base, context, false); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; } -- cgit v1.2.3-70-g09d2 From 66ef7b9162b89fa91ef03e5f72fc754516074f5e Mon Sep 17 00:00:00 2001 From: Allen Pan Date: Tue, 26 Mar 2024 10:41:17 -0400 Subject: drm/amd/display: expand the non standard link rate for testing [Why] 6.75 Gbps link rate training for DP_TEST_LINK_RATE_RATE_8 Reviewed-by: Charlene Liu Acked-by: Hamza Mahfooz Signed-off-by: Allen Pan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index b9f8eee5d2d1..519c3df78ee5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -137,8 +137,13 @@ enum dp_link_encoding { enum dp_test_link_rate { DP_TEST_LINK_RATE_RBR = 0x06, + DP_TEST_LINK_RATE_RATE_2 = 0x08, // Rate_2 - 2.16 Gbps/Lane + DP_TEST_LINK_RATE_RATE_3 = 0x09, // Rate_3 - 2.43 Gbps/Lane DP_TEST_LINK_RATE_HBR = 0x0A, + DP_TEST_LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2) - 3.24 Gbps/Lane + DP_TEST_LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane DP_TEST_LINK_RATE_HBR2 = 0x14, + DP_TEST_LINK_RATE_RATE_8 = 0x19, // Rate_8 - 6.75 Gbps/Lane DP_TEST_LINK_RATE_HBR3 = 0x1E, DP_TEST_LINK_RATE_UHBR10 = 0x01, DP_TEST_LINK_RATE_UHBR20 = 0x02, -- cgit v1.2.3-70-g09d2 From ded99dacb2d6e154349b442050a91c1651dae853 Mon Sep 17 00:00:00 2001 From: Parandhaman K Date: Fri, 22 Mar 2024 17:37:43 +0530 Subject: drm/amd/display: refactor vpg.h why and how: as part of cleanup, need to refactor vpg. It was improperly referenced as a dcn specfic part of link. the dcn agnostic code needed was ripped out and put into vpg.h, now in dc/inc/hw. Reviewed-by: Alvin Lee Acked-by: Hamza Mahfooz Signed-off-by: Parandhaman K Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h | 23 +--------- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h | 53 ++++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 3 +- 4 files changed, 57 insertions(+), 23 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h index ed9a5549c389..466ba20b9c61 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h @@ -26,6 +26,7 @@ #ifndef __DAL_DCN30_VPG_H__ #define __DAL_DCN30_VPG_H__ +#include "vpg.h" #define DCN30_VPG_FROM_VPG(vpg)\ container_of(vpg, struct dcn30_vpg, base) @@ -132,28 +133,6 @@ struct dcn30_vpg_mask { VPG_DCN3_REG_FIELD_LIST(uint32_t); }; -struct vpg; - -struct vpg_funcs { - void (*update_generic_info_packet)( - struct vpg *vpg, - uint32_t packet_index, - const struct dc_info_packet *info_packet, - bool immediate_update); - - void (*vpg_poweron)( - struct vpg *vpg); - - void (*vpg_powerdown)( - struct vpg *vpg); -}; - -struct vpg { - const struct vpg_funcs *funcs; - struct dc_context *ctx; - int inst; -}; - struct dcn30_vpg { struct vpg base; const struct dcn30_vpg_registers *regs; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h index 0e76eabce441..609e58dbd056 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h @@ -26,6 +26,7 @@ #ifndef __DAL_DCN31_VPG_H__ #define __DAL_DCN31_VPG_H__ +#include "vpg.h" #define DCN31_VPG_FROM_VPG(vpg)\ container_of(vpg, struct dcn31_vpg, base) diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h new file mode 100644 index 000000000000..51da368f5c3e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h @@ -0,0 +1,53 @@ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ + +#ifndef __DC_VPG_H__ +#define __DC_VPG_H__ + +struct dc_context; +struct dc_info_packet; + +struct vpg; + +struct vpg_funcs { + void (*update_generic_info_packet)( + struct vpg *vpg, + uint32_t packet_index, + const struct dc_info_packet *info_packet, + bool immediate_update); + + void (*vpg_poweron)( + struct vpg *vpg); + + void (*vpg_powerdown)( + struct vpg *vpg); +}; + +struct vpg { + const struct vpg_funcs *funcs; + struct dc_context *ctx; + int inst; +}; + +#endif /* DC_INC_VPG_H_ */ \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index ce68476e69d5..938421e02770 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -55,6 +55,8 @@ #include "dccg.h" #include "clk_mgr.h" #include "atomfirmware.h" +#include "vpg.h" + #define DC_LOGGER \ dc_logger #define DC_LOGGER_INIT(logger) \ @@ -67,7 +69,6 @@ #define RETIMER_REDRIVER_INFO(...) \ DC_LOG_RETIMER_REDRIVER( \ __VA_ARGS__) -#include "dc/dcn30/dcn30_vpg.h" #define MAX_MTP_SLOT_COUNT 64 #define LINK_TRAINING_ATTEMPTS 4 -- cgit v1.2.3-70-g09d2 From 9850a1c44ca90b0e2865467c67b4de45a67b3410 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Tue, 19 Mar 2024 19:41:34 -0400 Subject: drm/amd/display: add dwb support to dml2 [why] dwb was not POR previosly. now need to enable dwb in dml2. Limitation: HW DML assumes only one DWB one set of watermark for all 4 watermark sets one stream has one DWB only. WB scaling dml input has one set of scaling tap. (no chroma so far) needs to follow up Reviewed-by: Chris Park Acked-by: Hamza Mahfooz Signed-off-by: Charlene Liu Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dml2/display_mode_core.c | 1 + .../drm/amd/display/dc/dml2/display_mode_core.h | 1 + .../amd/display/dc/dml2/display_mode_lib_defines.h | 2 + .../amd/display/dc/dml2/dml2_translation_helper.c | 45 ++++++++++++++- drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c | 65 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h | 5 ++ drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c | 1 + 7 files changed, 119 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 470dca3950c3..6255101737b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -10214,6 +10214,7 @@ dml_get_var_func(fraction_of_urgent_bandwidth_imm_flip, dml_float_t, mode_lib->m dml_get_var_func(urgent_latency, dml_float_t, mode_lib->mp.UrgentLatency); dml_get_var_func(clk_dcf_deepsleep, dml_float_t, mode_lib->mp.DCFCLKDeepSleep); dml_get_var_func(wm_writeback_dram_clock_change, dml_float_t, mode_lib->mp.Watermark.WritebackDRAMClockChangeWatermark); +dml_get_var_func(wm_writeback_urgent, dml_float_t, mode_lib->mp.Watermark.WritebackUrgentWatermark); dml_get_var_func(stutter_efficiency, dml_float_t, mode_lib->mp.StutterEfficiency); dml_get_var_func(stutter_efficiency_no_vblank, dml_float_t, mode_lib->mp.StutterEfficiencyNotIncludingVBlank); dml_get_var_func(stutter_efficiency_z8, dml_float_t, mode_lib->mp.Z8StutterEfficiency); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h index 8452485684f5..3116b88e99dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h @@ -94,6 +94,7 @@ dml_get_var_decl(wm_usr_retraining, dml_float_t); dml_get_var_decl(urgent_latency, dml_float_t); dml_get_var_decl(wm_writeback_dram_clock_change, dml_float_t); +dml_get_var_decl(wm_writeback_urgent, dml_float_t); dml_get_var_decl(stutter_efficiency_no_vblank, dml_float_t); dml_get_var_decl(stutter_efficiency, dml_float_t); dml_get_var_decl(stutter_efficiency_z8, dml_float_t); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h index de63364be01d..14d389525296 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h @@ -41,6 +41,7 @@ #define DCN_DML__VM_PRESENT__1 1 #define DCN_DML__HOST_VM_PRESENT 1 #define DCN_DML__HOST_VM_PRESENT__1 1 +#define DCN_DML__DWB 1 #include "dml_depedencies.h" @@ -59,6 +60,7 @@ #define __DML_NUM_PLANES__ DCN_DML__NUM_PLANE #define __DML_NUM_CURSORS__ DCN_DML__NUM_CURSOR #define __DML_DPP_INVALID__ 0 +#define __DML_NUM_DMB__ DCN_DML__DWB #define __DML_PIPE_NO_PLANE__ 99 #define __DML_MAX_STATE_ARRAY_SIZE__ DCN_DML__NUM_PWR_STATE diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 151b480b3cea..f43a31cd2c8f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -1061,7 +1061,46 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, plane_index = 0; } } - +static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cfg_st *out, + unsigned int location, const struct dc_stream_state *in) +{ + if (in->num_wb_info > 0) { + for (int i = 0; i < __DML_NUM_DMB__; i++) { + const struct dc_writeback_info *wb_info = &in->writeback_info[i]; + /*current dml support 1 dwb per stream, limitation*/ + if (wb_info->wb_enabled) { + out->WritebackEnable[location] = wb_info->wb_enabled; + out->ActiveWritebacksPerSurface[location] = wb_info->dwb_params.cnv_params.src_width; + out->WritebackDestinationWidth[location] = wb_info->dwb_params.dest_width; + out->WritebackDestinationHeight[location] = wb_info->dwb_params.dest_height; + + out->WritebackSourceWidth[location] = wb_info->dwb_params.cnv_params.crop_en ? + wb_info->dwb_params.cnv_params.crop_width : + wb_info->dwb_params.cnv_params.src_width; + + out->WritebackSourceHeight[location] = wb_info->dwb_params.cnv_params.crop_en ? + wb_info->dwb_params.cnv_params.crop_height : + wb_info->dwb_params.cnv_params.src_height; + /*current design does not have chroma scaling, need to follow up*/ + out->WritebackHTaps[location] = wb_info->dwb_params.scaler_taps.h_taps > 0 ? + wb_info->dwb_params.scaler_taps.h_taps : 1; + out->WritebackVTaps[location] = wb_info->dwb_params.scaler_taps.v_taps > 0 ? + wb_info->dwb_params.scaler_taps.v_taps : 1; + + out->WritebackHRatio[location] = wb_info->dwb_params.cnv_params.crop_en ? + (double)wb_info->dwb_params.cnv_params.crop_width / + (double)wb_info->dwb_params.dest_width : + (double)wb_info->dwb_params.cnv_params.src_width / + (double)wb_info->dwb_params.dest_width; + out->WritebackVRatio[location] = wb_info->dwb_params.cnv_params.crop_en ? + (double)wb_info->dwb_params.cnv_params.crop_height / + (double)wb_info->dwb_params.dest_height : + (double)wb_info->dwb_params.cnv_params.src_height / + (double)wb_info->dwb_params.dest_height; + } + } + } +} void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg) { int i = 0, j = 0, k = 0; @@ -1106,6 +1145,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]); populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context); + /*Call site for populate_dml_writeback_cfg_from_stream_state*/ + populate_dml_writeback_cfg_from_stream_state(&dml_dispcfg->writeback, + disp_cfg_stream_location, context->streams[i]); + switch (context->streams[i]->debug.force_odm_combine_segments) { case 2: dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c index 81b4e08f8098..0f8b3336e26d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c @@ -404,6 +404,71 @@ void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display watermark->cstate_pstate.cstate_exit_z8_ns = dml_get_wm_z8_stutter(dml_core_ctx) * 1000; } +unsigned int dml2_calc_max_scaled_time( + unsigned int time_per_pixel, + enum mmhubbub_wbif_mode mode, + unsigned int urgent_watermark) +{ + unsigned int time_per_byte = 0; + unsigned int total_free_entry = 0xb40; + unsigned int buf_lh_capability; + unsigned int max_scaled_time; + + if (mode == PACKED_444) /* packed mode 32 bpp */ + time_per_byte = time_per_pixel/4; + else if (mode == PACKED_444_FP16) /* packed mode 64 bpp */ + time_per_byte = time_per_pixel/8; + + if (time_per_byte == 0) + time_per_byte = 1; + + buf_lh_capability = (total_free_entry*time_per_byte*32) >> 6; /* time_per_byte is in u6.6*/ + max_scaled_time = buf_lh_capability - urgent_watermark; + return max_scaled_time; +} + +void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx) +{ + int i, j = 0;; + struct mcif_arb_params *wb_arb_params = NULL; + struct dcn_bw_writeback *bw_writeback = NULL; + enum mmhubbub_wbif_mode wbif_mode = PACKED_444_FP16; /*for now*/ + + if (context->stream_count != 0) { + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->num_wb_info != 0) + j++; + } + } + if (j == 0) /*no dwb */ + return; + for (i = 0; i < __DML_NUM_DMB__; i++) { + bw_writeback = &context->bw_ctx.bw.dcn.bw_writeback; + wb_arb_params = &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[i]; + + for (j = 0 ; j < 4; j++) { + /*current dml only has one set of watermark, need to follow up*/ + bw_writeback->mcif_wb_arb[i].cli_watermark[j] = + dml_get_wm_writeback_urgent(dml_core_ctx) * 1000; + bw_writeback->mcif_wb_arb[i].pstate_watermark[j] = + dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000; + } + if (context->res_ctx.pipe_ctx[i].stream->phy_pix_clk != 0) { + /* time_per_pixel should be in u6.6 format */ + bw_writeback->mcif_wb_arb[i].time_per_pixel = + (1000000 << 6) / context->res_ctx.pipe_ctx[i].stream->phy_pix_clk; + } + bw_writeback->mcif_wb_arb[i].slice_lines = 32; + bw_writeback->mcif_wb_arb[i].arbitration_slice = 2; + bw_writeback->mcif_wb_arb[i].max_scaled_time = + dml2_calc_max_scaled_time(wb_arb_params->time_per_pixel, + wbif_mode, wb_arb_params->cli_watermark[0]); + /*not required any more*/ + bw_writeback->mcif_wb_arb[i].dram_speed_change_duration = + dml_get_wm_writeback_dram_clock_change(dml_core_ctx) * 1000; + + } +} void dml2_initialize_det_scratch(struct dml2_context *in_ctx) { int i; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h index 5842d6d3c4b6..04fcfe637119 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h @@ -40,9 +40,14 @@ void dml2_util_copy_dml_output(struct dml_output_cfg_st *dml_output_array, unsig unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, enum dml_output_encoder_class encoder, bool dsc_enabled); void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_state *context); void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx); +void dml2_extract_writeback_wm(struct dc_state *context, struct display_mode_lib_st *dml_core_ctx); int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id); bool is_dtbclk_required(const struct dc *dc, struct dc_state *context); bool dml2_is_stereo_timing(const struct dc_stream_state *stream); +unsigned int dml2_calc_max_scaled_time( + unsigned int time_per_pixel, + enum mmhubbub_wbif_mode mode, + unsigned int urgent_watermark); /* * dml2_dc_construct_pipes - This function will determine if we need additional pipes based diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index abf946f089b3..9412d5384a41 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -640,6 +640,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx); memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c)); dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx); + dml2_extract_writeback_wm(context, &dml2->v20.dml_core_ctx); //copy for deciding zstate use context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod; -- cgit v1.2.3-70-g09d2 From 1abfb9f9c767ca4c98c12ba2754abfe3ecf5ce8c Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 12 Mar 2024 11:55:52 -0400 Subject: drm/amd/display: Program VSC SDP colorimetry for all DP sinks >= 1.4 In order for display colorimetry to work correctly on DP displays we need to send the VSC SDP packet. We should only do so for panels with DPCD revision greater or equal to 1.4 as older receivers might have problems with it. Cc: stable@vger.kernel.org Cc: Joshua Ashton Cc: Xaver Hugl Cc: Melissa Wen Cc: Agustin Gutierrez Reviewed-by: Agustin Gutierrez Acked-by: Hamza Mahfooz Signed-off-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e80a75312cf7..7b016a2125e0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6326,7 +6326,9 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet @@ -6336,7 +6338,8 @@ create_stream_for_sink(struct drm_connector *connector, stream->use_vsc_sdp_for_colorimetry = aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; } else { - if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) + if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) stream->use_vsc_sdp_for_colorimetry = true; } if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) -- cgit v1.2.3-70-g09d2 From 038e2e2e0150f1649d40f7d915561cdf9e4dd5bf Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 21 Mar 2024 11:13:38 -0400 Subject: drm/amd/display: Set VSC SDP Colorimetry same way for MST and SST The previous check for the is_vsc_sdp_colorimetry_supported flag for MST sink signals did nothing. Simplify the code and use the same check for MST and SST. Cc: stable@vger.kernel.org Reviewed-by: Agustin Gutierrez Acked-by: Hamza Mahfooz Signed-off-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7b016a2125e0..4d9a76446df8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6333,15 +6333,9 @@ create_stream_for_sink(struct drm_connector *connector, // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // - stream->use_vsc_sdp_for_colorimetry = false; - if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - stream->use_vsc_sdp_for_colorimetry = - aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; - } else { - if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && - stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) - stream->use_vsc_sdp_for_colorimetry = true; - } + stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; + if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); -- cgit v1.2.3-70-g09d2 From 364b1c1de6de36c1b28690265c904c682aecc266 Mon Sep 17 00:00:00 2001 From: Zhongwei Date: Wed, 27 Mar 2024 13:49:40 +0800 Subject: drm/amd/display: Adjust dprefclk by down spread percentage. [Why] OLED panels show no display for large vtotal timings. [How] Check if ss is enabled and read from lut for spread spectrum percentage. Adjust dprefclk as required. DP_DTO adjustment is for edp only. Cc: stable@vger.kernel.org Reviewed-by: Nicholas Kazlauskas Acked-by: Hamza Mahfooz Signed-off-by: Zhongwei Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 50 ++++++++++++++++++++++ .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 8 ++-- 2 files changed, 54 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 8efde1cfb49a..6c9b4e6491a5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -73,6 +73,12 @@ #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L +#define regCLK5_0_CLK5_spll_field_8 0x464b +#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0 + +#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd +#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L + #define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 #define REG(reg_name) \ @@ -412,6 +418,17 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs { } +static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc_context *ctx = clk_mgr->base.ctx; + uint32_t ssc_enable; + + REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable); + + return ssc_enable == 1; +} + static void init_clk_states(struct clk_mgr *clk_mgr) { struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); @@ -429,7 +446,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr) void dcn35_init_clocks(struct clk_mgr *clk_mgr) { + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); init_clk_states(clk_mgr); + + // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk + if (dcn35_is_spll_ssc_enabled(clk_mgr)) + clk_mgr->dp_dto_source_clock_in_khz = + dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz); + else + clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz; + } static struct clk_bw_params dcn35_bw_params = { .vram_type = Ddr4MemType, @@ -518,6 +544,28 @@ static DpmClocks_t_dcn35 dummy_clocks; static struct dcn35_watermarks dummy_wms = { 0 }; +static struct dcn35_ss_info_table ss_info_table = { + .ss_divider = 1000, + .ss_percentage = {0, 0, 375, 375, 375} +}; + +static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) +{ + struct dc_context *ctx = clk_mgr->base.ctx; + uint32_t clock_source; + + REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); + // If it's DFS mode, clock_source is 0. + if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) { + clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; + + if (clk_mgr->dprefclk_ss_percentage != 0) { + clk_mgr->ss_on_dprefclk = true; + clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider; + } + } +} + static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table) { int i, num_valid_sets; @@ -1024,6 +1072,8 @@ void dcn35_clk_mgr_construct( dce_clock_read_ss_info(&clk_mgr->base); /*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/ + dcn35_read_ss_info_from_lut(&clk_mgr->base); + clk_mgr->base.base.bw_params = &dcn35_bw_params; if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 970644b695cd..b5e0289d2fe8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk( struct bp_pixel_clock_parameters bp_pc_params = {0}; enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; - if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0) + // Apply ssed(spread spectrum) dpref clock for edp only. + if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 + && pix_clk_params->signal_type == SIGNAL_TYPE_EDP + && encoding == DP_8b_10b_ENCODING) dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz; // For these signal types Driver to program DP_DTO without calling VBIOS Command table if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) { @@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz( unsigned int modulo_hz = 0; unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz; - if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0) - dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz; - if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) { clock_hz = REG_READ(PHASE[inst]); -- cgit v1.2.3-70-g09d2 From 3ca7317809fc247336e6a602e855be76dfee9a77 Mon Sep 17 00:00:00 2001 From: George Shen Date: Wed, 27 Mar 2024 18:05:51 -0400 Subject: drm/amd/display: Rebuild test pattern params for DP_TEST_PATTERN_VIDEO_MODE [Why] For video mode test pattern (i.e. test pattern disable), the call to rebuild test pattern params for the pipe is skipped. This causes dynamic disablement of test pattern to not work, as the test_pattern_params of the pipe will not be updated and retain the values of the previously enabled test pattern. [How] Rebuild test pattern params even when test pattern is video mode, allowing the pipe to have updated test_pattern_params values. Reviewed-by: Nevenko Stupar Reviewed-by: Chaitanya Dhere Acked-by: Hamza Mahfooz Signed-off-by: George Shen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 4 ++-- drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index d48a181d2249..2633e481234f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2329,8 +2329,8 @@ static bool update_pipe_params_after_odm_slice_count_change( if (pool->funcs->build_pipe_pix_clk_params) pool->funcs->build_pipe_pix_clk_params(otg_master); - if (otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) - resource_build_test_pattern_params(&context->res_ctx, otg_master); + resource_build_test_pattern_params(&context->res_ctx, otg_master); + return result; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 9e6498d2439d..5be976fa44f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -2120,7 +2120,7 @@ static bool dcn32_apply_merge_split_flags_helper( struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx, context->streams[i]); - if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) + if (otg_master) resource_build_test_pattern_params(&context->res_ctx, otg_master); } } -- cgit v1.2.3-70-g09d2 From dc21cf28a1c90cf96446a66d03b92a77a23a1ed8 Mon Sep 17 00:00:00 2001 From: Qili Lu Date: Thu, 28 Mar 2024 20:19:56 -0400 Subject: Revert "drm/amd/display: Enabling urgent latency adjustment for DCN35" This reverts commit b72a7e0fd0f8d235f885f84642e5c71f4e058c4b. It causes a dead loop in dml_prefetch_check. Reviewed-by: Nicholas Kazlauskas Acked-by: Hamza Mahfooz Signed-off-by: Qili Lu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 60f251cf973b..add169162f2a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -195,9 +195,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .dcn_downspread_percent = 0.5, .gpuvm_min_page_size_bytes = 4096, .hostvm_min_page_size_bytes = 4096, - .do_urgent_latency_adjustment = 1, + .do_urgent_latency_adjustment = 0, .urgent_latency_adjustment_fabric_clock_component_us = 0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 3000, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, }; void dcn35_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) -- cgit v1.2.3-70-g09d2 From 2fa5d653bd473a8ebec64fdf363bffb4075176cd Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sat, 30 Mar 2024 16:51:03 -0400 Subject: drm/amd/display: [FW Promotion] Release 0.0.212.0 - Add boot option to change the ONO powerup flow, impacting the order of power domains to power up or down first Acked-by: Hamza Mahfooz Signed-off-by: Anthony Koo Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index d0b581b8331c..944f14307517 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -662,6 +662,7 @@ union dmub_fw_boot_options { uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */ uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/ uint32_t ips_disable: 3; /* options to disable ips support*/ + uint32_t ips_sequential_ono: 1; /**< 1 to enable sequential ONO IPS sequence */ uint32_t reserved : 9; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ -- cgit v1.2.3-70-g09d2 From 08f7c681177b209a02740aa2c58d11fcf83d6e4d Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 26 Mar 2024 08:38:37 -0600 Subject: drm/amd/display: Add V_TOTAL_REGS to dcn10 DCN10 OPTC is used by other DCNs, and in some cases it might be useful to have V_TOTAL_REGS available. This commit add V_TOTAL_REGS as part of the TG field. Acked-by: Hamza Mahfooz Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index 8a45d26b7531..2f3bd7648ba7 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -517,12 +517,15 @@ struct dcn_optc_registers { type MANUAL_FLOW_CONTROL;\ type MANUAL_FLOW_CONTROL_SEL; +#define V_TOTAL_REGS(type) + #define TG_REG_FIELD_LIST(type) \ TG_REG_FIELD_LIST_DCN1_0(type)\ type OTG_V_SYNC_MODE;\ type OTG_DRR_TRIGGER_WINDOW_START_X;\ type OTG_DRR_TRIGGER_WINDOW_END_X;\ type OTG_DRR_V_TOTAL_CHANGE_LIMIT;\ + V_TOTAL_REGS(type)\ type OTG_OUT_MUX;\ type OTG_M_CONST_DTO_PHASE;\ type OTG_M_CONST_DTO_MODULO;\ -- cgit v1.2.3-70-g09d2 From af068dc28dea24560b1573808fdf413d523e9b85 Mon Sep 17 00:00:00 2001 From: Xiang Yang Date: Sun, 7 Apr 2024 17:01:31 +0800 Subject: drm/amd/display: delete the redundant initialization in dcn3_51_soc the dram_clock_change_latency_us in dcn3_51_soc is initialized twice, so delete one of them. Acked-by: Alex Deucher Signed-off-by: Xiang Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index 5abf120ff075..e4f333d4fb54 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -237,7 +237,6 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, .num_chans = 4, - .dram_clock_change_latency_us = 11.72, .dispclk_dppclk_vco_speed_mhz = 2400.0, }; -- cgit v1.2.3-70-g09d2 From 526b184e888371dd3fdb131961461a2f30cd0ae4 Mon Sep 17 00:00:00 2001 From: Yifan Zhang Date: Sun, 7 Apr 2024 22:01:35 +0800 Subject: drm/amdgpu: differentiate external rev id for gfx 11.5.0 This patch to differentiate external rev id for gfx 11.5.0. Signed-off-by: Yifan Zhang Reviewed-by: Tim Huang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc21.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index abe319b0f063..43ca63fe85ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -720,7 +720,10 @@ static int soc21_common_early_init(void *handle) AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_GFX_PG; - adev->external_rev_id = adev->rev_id + 0x1; + if (adev->rev_id == 0) + adev->external_rev_id = 0x1; + else + adev->external_rev_id = adev->rev_id + 0x10; break; case IP_VERSION(11, 5, 1): adev->cg_flags = -- cgit v1.2.3-70-g09d2 From 8bdfb4ea95ca738d33ef71376c21eba20130f2eb Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Tue, 26 Mar 2024 15:32:46 -0400 Subject: drm/amdkfd: Reset GPU on queue preemption failure Currently, with F32 HWS GPU reset is only when unmap queue fails. However, if compute queue doesn't repond to preemption request in time unmap will return without any error. In this case, only preemption error is logged and Reset is not triggered. Call GPU reset in this case also. Reviewed-by: Alex Deucher Signed-off-by: Harish Kasiviswanathan Reviewed-by: Mukul Joshi Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f4d395e38683..0b655555e167 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2001,6 +2001,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n"); while (halt_if_hws_hang) schedule(); + kfd_hws_hang(dqm); return -ETIME; } -- cgit v1.2.3-70-g09d2 From 65ff8092e4802f96d87d3d7cde146961f5228265 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Sat, 23 Mar 2024 20:46:53 -0400 Subject: drm/amdgpu: always force full reset for SOC21 There are cases where soft reset seems to succeed, but does not, so always use mode1/2 for now. Reviewed-by: Harish Kasiviswanathan Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/soc21.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 581a3bd11481..8526282f4da1 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -457,10 +457,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev) { switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(11, 0, 0): - return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): - return false; default: return true; } -- cgit v1.2.3-70-g09d2 From 4b18a91faf1752f9bd69a4ed3aed2c8f6e5b0528 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 21 Mar 2024 17:46:36 +0530 Subject: drm/amdgpu: Refine IB schedule error logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Downgrade to debug information when IBs are skipped. Also, use dev_* to identify the device. Signed-off-by: Lijo Lazar Reviewed-by: Christian König Reviewed-by: Asad Kamal Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 4b3000c21ef2..e4742b65032d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -304,12 +304,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) dma_fence_set_error(finished, -ECANCELED); if (finished->error < 0) { - DRM_INFO("Skip scheduling IBs!\n"); + dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)", + ring->name); } else { r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, &fence); if (r) - DRM_ERROR("Error scheduling IBs (%d)\n", r); + dev_err(adev->dev, + "Error scheduling IBs (%d) in ring(%s)", r, + ring->name); } job->job_run_counter++; -- cgit v1.2.3-70-g09d2 From 0f1bbcc2bab25d5fb2dfb1ee3e08131437690d3d Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Mon, 25 Mar 2024 13:24:31 +0800 Subject: drm/amdgpu/umsch: reinitialize write pointer in hw init Otherwise the old one will be used during GPU reset. That's not expected. Signed-off-by: Lang Yu Reviewed-by: Feifei Xu Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c index 84368cf1e175..bd57896ab85d 100644 --- a/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c @@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch) WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size); + ring->wptr = 0; + data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE); data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK); WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data); -- cgit v1.2.3-70-g09d2 From 8b2be55f4d6c1099d7f629b0ed7535a5be788c83 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 14 Feb 2024 17:55:54 +0530 Subject: drm/amdgpu: Reset dGPU if suspend got aborted For SOC21 ASICs, there is an issue in re-enabling PM features if a suspend got aborted. In such cases, reset the device during resume phase. This is a workaround till a proper solution is finalized. Signed-off-by: Lijo Lazar Reviewed-by: Alex Deucher Reviewed-by: Yang Wang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/soc21.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 8526282f4da1..abe319b0f063 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -867,10 +867,35 @@ static int soc21_common_suspend(void *handle) return soc21_common_hw_fini(adev); } +static bool soc21_need_reset_on_resume(struct amdgpu_device *adev) +{ + u32 sol_reg1, sol_reg2; + + /* Will reset for the following suspend abort cases. + * 1) Only reset dGPU side. + * 2) S3 suspend got aborted and TOS is active. + */ + if (!(adev->flags & AMD_IS_APU) && adev->in_s3 && + !adev->suspend_complete) { + sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); + msleep(100); + sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81); + + return (sol_reg1 != sol_reg2); + } + + return false; +} + static int soc21_common_resume(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (soc21_need_reset_on_resume(adev)) { + dev_info(adev->dev, "S3 suspend aborted, resetting..."); + soc21_asic_reset(adev); + } + return soc21_common_hw_init(adev); } -- cgit v1.2.3-70-g09d2 From d4396924c3d44f34d0643f650e70892e07f3677f Mon Sep 17 00:00:00 2001 From: Li Ma Date: Thu, 28 Mar 2024 10:55:10 +0800 Subject: drm/amd/display: add DCN 351 version for microcode load There is a new DCN veriosn 3.5.1 need to load Signed-off-by: Li Ma Reviewed-by: Yifan Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 71d2d44681b2..173438fc7537 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); +#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); + /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 @@ -4820,9 +4823,11 @@ static int dm_init_microcode(struct amdgpu_device *adev) fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; break; case IP_VERSION(3, 5, 0): - case IP_VERSION(3, 5, 1): fw_name_dmub = FIRMWARE_DCN_35_DMUB; break; + case IP_VERSION(3, 5, 1): + fw_name_dmub = FIRMWARE_DCN_351_DMUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; -- cgit v1.2.3-70-g09d2 From 31729e8c21ecfd671458e02b6511eb68c2225113 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Wed, 27 Mar 2024 13:10:37 +0800 Subject: drm/amd/pm: fixes a random hang in S4 for SMU v13.0.4/11 While doing multiple S4 stress tests, GC/RLC/PMFW get into an invalid state resulting into hard hangs. Adding a GFX reset as workaround just before sending the MP1_UNLOAD message avoids this failure. Signed-off-by: Tim Huang Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c index bb98156b2fa1..949131bd1ecb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c @@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) struct amdgpu_device *adev = smu->adev; int ret = 0; - if (!en && !adev->in_s0ix) + if (!en && !adev->in_s0ix) { + /* Adds a GFX reset as workaround just before sending the + * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering + * an invalid state. + */ + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, + SMU_RESET_MODE_2, NULL); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); + } return ret; } -- cgit v1.2.3-70-g09d2 From a3a4c0b12346a2493b41c8790d85141844a04e28 Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Fri, 22 Mar 2024 12:25:16 -0400 Subject: drm/amdgpu : Add mes_log_enable to control mes log feature The MES log might slow down the performance for extra step of log the data, disable it by default and introduce a parameter can enable it when necessary Signed-off-by: shaoyunl Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 7 +++++-- 4 files changed, 20 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9c62552bec34..b3b84647207e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -210,6 +210,7 @@ extern int amdgpu_async_gfx_ring; extern int amdgpu_mcbp; extern int amdgpu_discovery; extern int amdgpu_mes; +extern int amdgpu_mes_log_enable; extern int amdgpu_mes_kiq; extern int amdgpu_noretry; extern int amdgpu_force_asic_type; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 80b9642f2bc4..e4277298cf1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -195,6 +195,7 @@ int amdgpu_async_gfx_ring = 1; int amdgpu_mcbp = -1; int amdgpu_discovery = -1; int amdgpu_mes; +int amdgpu_mes_log_enable = 0; int amdgpu_mes_kiq; int amdgpu_noretry = -1; int amdgpu_force_asic_type = -1; @@ -667,6 +668,15 @@ MODULE_PARM_DESC(mes, "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)"); module_param_named(mes, amdgpu_mes, int, 0444); +/** + * DOC: mes_log_enable (int) + * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log. + * (0 = disabled (default), 1 = enabled) + */ +MODULE_PARM_DESC(mes_log_enable, + "Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)"); +module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444); + /** * DOC: mes_kiq (int) * Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index a98e03e0a51f..cfd613ea39b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -102,6 +102,9 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) { int r; + if (!amdgpu_mes_log_enable) + return 0; + r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->mes.event_log_gpu_obj, @@ -1565,7 +1568,7 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev) #if defined(CONFIG_DEBUG_FS) struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; - if (adev->enable_mes) + if (adev->enable_mes && amdgpu_mes_log_enable) debugfs_create_file("amdgpu_mes_event_log", 0444, root, adev, &amdgpu_debugfs_mes_event_log_fops); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 072c478665ad..63f281a9984d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -411,8 +411,11 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes) mes_set_hw_res_pkt.enable_reg_active_poll = 1; mes_set_hw_res_pkt.enable_level_process_quantum_check = 1; mes_set_hw_res_pkt.oversubscription_timer = 50; - mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; - mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr; + if (amdgpu_mes_log_enable) { + mes_set_hw_res_pkt.enable_mes_event_int_logging = 1; + mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = + mes->event_log_gpu_addr; + } return mes_v11_0_submit_pkt_and_poll_completion(mes, &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt), -- cgit v1.2.3-70-g09d2 From 5b0cd091d905ee9da0a3ecdf06b9cbdd17ba711d Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Fri, 22 Mar 2024 12:44:55 -0400 Subject: drm/amdgpu : Increase the mes log buffer size as per new MES FW version From MES version 0x54, the log entry increased and require the log buffer size to be increased. The 16k is maximum size agreed Signed-off-by: shaoyunl Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 5 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 1 + 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index cfd613ea39b3..a00cf4756ad0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -105,7 +105,7 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev) if (!amdgpu_mes_log_enable) return 0; - r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, + r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->mes.event_log_gpu_obj, &adev->mes.event_log_gpu_addr, @@ -1552,12 +1552,11 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused) uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr); seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4, - mem, PAGE_SIZE, false); + mem, AMDGPU_MES_LOG_BUFFER_SIZE, false); return 0; } - DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index 7d4f93fea937..4c8fc3117ef8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -52,6 +52,7 @@ enum amdgpu_mes_priority_level { #define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */ #define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */ +#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */ struct amdgpu_mes_funcs; -- cgit v1.2.3-70-g09d2 From c5b1ccff26950d50bf2043cb2af9bafb1f08bbaf Mon Sep 17 00:00:00 2001 From: lima1002 Date: Mon, 29 Jan 2024 20:17:54 +0800 Subject: drm/amd/swsmu: Update smu v14.0.0 headers to be 14.0.1 compatible update ppsmc.h pmfw.h and driver_if.h for smu v14_0_1 Reviewed-by: Alex Deucher Signed-off-by: lima1002 Signed-off-by: Alex Deucher --- .../pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h | 33 +- .../amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h | 55 +++- .../amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h | 18 +- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h | 1 + drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 2 +- .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c | 347 +++++++++++++++++++-- 6 files changed, 413 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 5bb7a63c0602..97522c085258 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -144,6 +144,37 @@ typedef struct { uint32_t MaxGfxClk; } DpmClocks_t; +//Freq in MHz +//Voltage in milli volts with 2 fractional bits +typedef struct { + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks0[NUM_VCN_DPM_LEVELS]; + uint32_t VClocks1[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks0[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks1[NUM_VCN_DPM_LEVELS]; + uint32_t VPEClocks[NUM_VPE_DPM_LEVELS]; + uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS]; + uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS]; + + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t Vcn0ClkLevelsEnabled; //Applies to both Vclk0 and Dclk0 + uint8_t Vcn1ClkLevelsEnabled; //Applies to both Vclk1 and Dclk1 + uint8_t VpeClkLevelsEnabled; + uint8_t NumMemPstatesEnabled; + uint8_t NumFclkLevelsEnabled; + uint8_t spare; + + uint32_t MinGfxClk; + uint32_t MaxGfxClk; +} DpmClocks_t_v14_0_1; + typedef struct { uint16_t CoreFrequency[16]; //Target core frequency [MHz] uint16_t CorePower[16]; //CAC calculated core power [mW] @@ -224,7 +255,7 @@ typedef enum { #define TABLE_CUSTOM_DPM 2 // Called by Driver #define TABLE_BIOS_GPIO_CONFIG 3 // Called by BIOS #define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS -#define TABLE_SPARE0 5 // Unused +#define TABLE_MOMENTARY_PM 5 // Called by Tools #define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log #define TABLE_SMU_METRICS 7 // Called by Driver and SMF/PMF #define TABLE_COUNT 8 diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h index 356e0f57a426..ddb625860083 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h @@ -42,7 +42,7 @@ #define FEATURE_EDC_BIT 7 #define FEATURE_PLL_POWER_DOWN_BIT 8 #define FEATURE_VDDOFF_BIT 9 -#define FEATURE_VCN_DPM_BIT 10 +#define FEATURE_VCN_DPM_BIT 10 /* this is for both VCN0 and VCN1 */ #define FEATURE_DS_MPM_BIT 11 #define FEATURE_FCLK_DPM_BIT 12 #define FEATURE_SOCCLK_DPM_BIT 13 @@ -56,9 +56,9 @@ #define FEATURE_DS_GFXCLK_BIT 21 #define FEATURE_DS_SOCCLK_BIT 22 #define FEATURE_DS_LCLK_BIT 23 -#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 // for all DISP clks +#define FEATURE_LOW_POWER_DCNCLKS_BIT 24 #define FEATURE_DS_SHUBCLK_BIT 25 -#define FEATURE_SPARE0_BIT 26 //SPARE +#define FEATURE_RESERVED0_BIT 26 #define FEATURE_ZSTATES_BIT 27 #define FEATURE_IOMMUL2_PG_BIT 28 #define FEATURE_DS_FCLK_BIT 29 @@ -66,8 +66,8 @@ #define FEATURE_DS_MP1CLK_BIT 31 #define FEATURE_WHISPER_MODE_BIT 32 #define FEATURE_SMU_LOW_POWER_BIT 33 -#define FEATURE_SMART_L3_RINSER_BIT 34 -#define FEATURE_SPARE1_BIT 35 //SPARE +#define FEATURE_RESERVED1_BIT 34 /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */ +#define FEATURE_GFX_DEM_BIT 35 /* v14_0_0 SPARE; v14_0_1 GFX_DEM */ #define FEATURE_PSI_BIT 36 #define FEATURE_PROCHOT_BIT 37 #define FEATURE_CPUOFF_BIT 38 @@ -77,11 +77,11 @@ #define FEATURE_PERF_LIMIT_BIT 42 #define FEATURE_CORE_DLDO_BIT 43 #define FEATURE_DVO_BIT 44 -#define FEATURE_DS_VCN_BIT 45 +#define FEATURE_DS_VCN_BIT 45 /* v14_0_1 this is for both VCN0 and VCN1 */ #define FEATURE_CPPC_BIT 46 #define FEATURE_CPPC_PREFERRED_CORES 47 #define FEATURE_DF_CSTATES_BIT 48 -#define FEATURE_SPARE2_BIT 49 //SPARE +#define FEATURE_FAST_PSTATE_CLDO_BIT 49 /* v14_0_0 SPARE */ #define FEATURE_ATHUB_PG_BIT 50 #define FEATURE_VDDOFF_ECO_BIT 51 #define FEATURE_ZSTATES_ECO_BIT 52 @@ -93,8 +93,8 @@ #define FEATURE_DS_IPUCLK_BIT 58 #define FEATURE_DS_VPECLK_BIT 59 #define FEATURE_VPE_DPM_BIT 60 -#define FEATURE_SPARE_61 61 -#define FEATURE_FP_DIDT 62 +#define FEATURE_SMART_L3_RINSER_BIT 61 /* v14_0_0 SPARE*/ +#define FEATURE_PCC_BIT 62 /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */ #define NUM_FEATURES 63 // Firmware Header/Footer @@ -151,6 +151,43 @@ typedef struct { // MP1_EXT_SCRATCH7 = RTOS Current Job } FwStatus_t; +typedef struct { + // MP1_EXT_SCRATCH0 + uint32_t DpmHandlerID : 8; + uint32_t ActivityMonitorID : 8; + uint32_t DpmTimerID : 8; + uint32_t DpmHubID : 4; + uint32_t DpmHubTask : 4; + // MP1_EXT_SCRATCH1 + uint32_t CclkSyncStatus : 8; + uint32_t ZstateStatus : 4; + uint32_t Cpu1VddOff : 4; + uint32_t DstateFun : 4; + uint32_t DstateDev : 4; + uint32_t GfxOffStatus : 2; + uint32_t Cpu0Off : 2; + uint32_t Cpu1Off : 2; + uint32_t Cpu0VddOff : 2; + // MP1_EXT_SCRATCH2 + uint32_t P2JobHandler :32; + // MP1_EXT_SCRATCH3 + uint32_t PostCode :32; + // MP1_EXT_SCRATCH4 + uint32_t MsgPortBusy :15; + uint32_t RsmuPmiP1Pending : 1; + uint32_t RsmuPmiP2PendingCnt : 8; + uint32_t DfCstateExitPending : 1; + uint32_t Pc6EntryPending : 1; + uint32_t Pc6ExitPending : 1; + uint32_t WarmResetPending : 1; + uint32_t Mp0ClkPending : 1; + uint32_t InWhisperMode : 1; + uint32_t spare2 : 2; + // MP1_EXT_SCRATCH5 + uint32_t IdleMask :32; + // MP1_EXT_SCRATCH6 = RTOS threads' status + // MP1_EXT_SCRATCH7 = RTOS Current Job +} FwStatus_t_v14_0_1; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h index ca7ce4251482..c4dc5881d8df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h @@ -72,23 +72,19 @@ #define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0) - #define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU - -#define PPSMC_MSG_spare_0x17 0x17 -#define PPSMC_MSG_spare_0x18 0x18 +#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency +#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency #define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry #define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry #define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK #define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK - #define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK #define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK #define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0) -#define PPSMC_MSG_spare_0x20 0x20 +#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage #define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0 #define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default - #define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK #define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity @@ -99,8 +95,8 @@ #define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM #define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK #define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK -#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler -#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler +#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler +#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler #define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis #define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn #define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE @@ -110,7 +106,9 @@ #define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA #define PPSMC_MSG_SetSoftMaxVpe 0x36 ///< #define PPSMC_MSG_SetSoftMinVpe 0x37 ///< -#define PPSMC_Message_Count 0x38 ///< Total number of PPSMC messages +#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache +#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache +#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages /** @}*/ /** diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 3f7463c1c1a9..4af1985ae446 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -27,6 +27,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 #define FEATURE_MASK(feature) (1ULL << feature) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 9e39f99154f9..07a65e005785 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -234,7 +234,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; case IP_VERSION(14, 0, 1): - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; break; default: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index d6de6d97286c..63399c00cc28 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -161,7 +161,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu) SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); - SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), + SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); @@ -171,7 +171,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu) goto err0_out; smu_table->metrics_time = 0; - smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); + smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL); if (!smu_table->clocks_table) goto err1_out; @@ -593,6 +593,60 @@ static int smu_v14_0_0_mode2_reset(struct smu_context *smu) return ret; } +static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t dpm_level, + uint32_t *freq) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + + if (!clk_table || clk_type >= SMU_CLK_COUNT) + return -EINVAL; + + switch (clk_type) { + case SMU_SOCCLK: + if (dpm_level >= clk_table->NumSocClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->SocClocks[dpm_level]; + break; + case SMU_VCLK: + if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->VClocks0[dpm_level]; + break; + case SMU_DCLK: + if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->DClocks0[dpm_level]; + break; + case SMU_VCLK1: + if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->VClocks1[dpm_level]; + break; + case SMU_DCLK1: + if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled) + return -EINVAL; + *freq = clk_table->DClocks1[dpm_level]; + break; + case SMU_UCLK: + case SMU_MCLK: + if (dpm_level >= clk_table->NumMemPstatesEnabled) + return -EINVAL; + *freq = clk_table->MemPstateTable[dpm_level].MemClk; + break; + case SMU_FCLK: + if (dpm_level >= clk_table->NumFclkLevelsEnabled) + return -EINVAL; + *freq = clk_table->FclkClocks_Freq[dpm_level]; + break; + default: + return -EINVAL; + } + + return 0; +} + static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t dpm_level, @@ -637,6 +691,19 @@ static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu, return 0; } +static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t dpm_level, + uint32_t *freq) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq); + + return 0; +} + static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) { @@ -657,6 +724,8 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, break; case SMU_VCLK: case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: feature_id = SMU_FEATURE_VCN_DPM_BIT; break; default: @@ -666,6 +735,126 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu, return smu_cmn_feature_is_enabled(smu, feature_id); } +static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + uint32_t clock_limit; + uint32_t max_dpm_level, min_dpm_level; + int ret = 0; + + if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + clock_limit = smu->smu_table.boot_values.uclk; + break; + case SMU_FCLK: + clock_limit = smu->smu_table.boot_values.fclk; + break; + case SMU_GFXCLK: + case SMU_SCLK: + clock_limit = smu->smu_table.boot_values.gfxclk; + break; + case SMU_SOCCLK: + clock_limit = smu->smu_table.boot_values.socclk; + break; + case SMU_VCLK: + case SMU_VCLK1: + clock_limit = smu->smu_table.boot_values.vclk; + break; + case SMU_DCLK: + case SMU_DCLK1: + clock_limit = smu->smu_table.boot_values.dclk; + break; + default: + clock_limit = 0; + break; + } + + /* clock in Mhz unit */ + if (min) + *min = clock_limit / 100; + if (max) + *max = clock_limit / 100; + + return 0; + } + + if (max) { + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + *max = clk_table->MaxGfxClk; + break; + case SMU_MCLK: + case SMU_UCLK: + case SMU_FCLK: + max_dpm_level = 0; + break; + case SMU_SOCCLK: + max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; + break; + case SMU_VCLK: + case SMU_DCLK: + max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1; + break; + case SMU_VCLK1: + case SMU_DCLK1: + max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1; + break; + default: + ret = -EINVAL; + goto failed; + } + + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); + if (ret) + goto failed; + } + } + + if (min) { + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + *min = clk_table->MinGfxClk; + break; + case SMU_MCLK: + case SMU_UCLK: + min_dpm_level = clk_table->NumMemPstatesEnabled - 1; + break; + case SMU_FCLK: + min_dpm_level = clk_table->NumFclkLevelsEnabled - 1; + break; + case SMU_SOCCLK: + min_dpm_level = 0; + break; + case SMU_VCLK: + case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: + min_dpm_level = 0; + break; + default: + ret = -EINVAL; + goto failed; + } + + if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); + if (ret) + goto failed; + } + } + +failed: + return ret; +} + static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, @@ -736,7 +925,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, } if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max); if (ret) goto failed; } @@ -768,7 +957,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu, } if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min); if (ret) goto failed; } @@ -778,6 +967,19 @@ failed: return ret; } +static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max); + + return 0; +} + static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value) @@ -811,6 +1013,37 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu, return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value); } +static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *count) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + + switch (clk_type) { + case SMU_SOCCLK: + *count = clk_table->NumSocClkLevelsEnabled; + break; + case SMU_VCLK: + case SMU_DCLK: + *count = clk_table->Vcn0ClkLevelsEnabled; + break; + case SMU_VCLK1: + case SMU_DCLK1: + *count = clk_table->Vcn1ClkLevelsEnabled; + break; + case SMU_MCLK: + *count = clk_table->NumMemPstatesEnabled; + break; + case SMU_FCLK: + *count = clk_table->NumFclkLevelsEnabled; + break; + default: + break; + } + + return 0; +} + static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *count) @@ -840,6 +1073,18 @@ static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu, return 0; } +static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *count) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_get_dpm_level_count(smu, clk_type, count); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_get_dpm_level_count(smu, clk_type, count); + + return 0; +} + static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { @@ -866,18 +1111,20 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, case SMU_SOCCLK: case SMU_VCLK: case SMU_DCLK: + case SMU_VCLK1: + case SMU_DCLK1: case SMU_MCLK: case SMU_FCLK: ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value); if (ret) break; - ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count); + ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count); if (ret) break; for (i = 0; i < count; i++) { - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value); if (ret) break; @@ -940,8 +1187,13 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, break; case SMU_VCLK: case SMU_DCLK: - msg_set_min = SMU_MSG_SetHardMinVcn; - msg_set_max = SMU_MSG_SetSoftMaxVcn; + msg_set_min = SMU_MSG_SetHardMinVcn0; + msg_set_max = SMU_MSG_SetSoftMaxVcn0; + break; + case SMU_VCLK1: + case SMU_DCLK1: + msg_set_min = SMU_MSG_SetHardMinVcn1; + msg_set_max = SMU_MSG_SetSoftMaxVcn1; break; default: return -EINVAL; @@ -971,11 +1223,11 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, case SMU_FCLK: case SMU_VCLK: case SMU_DCLK: - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); if (ret) break; - ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); + ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); if (ret) break; @@ -1000,25 +1252,25 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); sclk_min = sclk_max; fclk_min = fclk_max; socclk_min = socclk_max; break; case AMD_DPM_FORCED_LEVEL_LOW: - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); sclk_max = sclk_min; fclk_max = fclk_min; socclk_max = socclk_min; break; case AMD_DPM_FORCED_LEVEL_AUTO: - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); - smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); + smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: @@ -1067,6 +1319,18 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu, return ret; } +static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + + smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; + smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; + smu->gfx_actual_hard_min_freq = 0; + smu->gfx_actual_soft_max_freq = 0; + + return 0; +} + static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) { DpmClocks_t *clk_table = smu->smu_table.clocks_table; @@ -1079,6 +1343,16 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm return 0; } +static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu); + + return 0; +} + static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu, bool enable) { @@ -1095,6 +1369,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, 0, NULL); } +static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table; + uint8_t idx; + + /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */ + for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) { + clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0; + clock_table->SocClocks[idx].Vol = 0; + } + + for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) { + clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0; + clock_table->VPEClocks[idx].Vol = 0; + } + + return 0; +} + static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) { DpmClocks_t *clk_table = smu->smu_table.clocks_table; @@ -1114,6 +1407,16 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks * return 0; } +static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + smu_14_0_0_get_dpm_table(smu, clock_table); + else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) + smu_14_0_1_get_dpm_table(smu, clock_table); + + return 0; +} + static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .check_fw_status = smu_v14_0_check_fw_status, .check_fw_version = smu_v14_0_check_fw_version, @@ -1135,16 +1438,16 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .set_driver_table_location = smu_v14_0_set_driver_table_location, .gfx_off_control = smu_v14_0_gfx_off_control, .mode2_reset = smu_v14_0_0_mode2_reset, - .get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq, + .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq, .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, .set_performance_level = smu_v14_0_0_set_performance_level, - .set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters, + .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, - .get_dpm_clock_table = smu_14_0_0_get_dpm_table, + .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, }; static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu) -- cgit v1.2.3-70-g09d2 From 533eefb9be76c3b23d220ee18edfda8eb56cefff Mon Sep 17 00:00:00 2001 From: Yifan Zhang Date: Tue, 12 Dec 2023 17:17:05 +0800 Subject: drm/amdgpu: add smu 14.0.1 discovery support This patch to add smu 14.0.1 support Reviewed-by: Alex Deucher Signed-off-by: Yifan Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index fdd36fb027ab..ac5bf01fe8d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1896,6 +1896,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); break; case IP_VERSION(14, 0, 0): + case IP_VERSION(14, 0, 1): amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); break; default: -- cgit v1.2.3-70-g09d2 From f886b49feaae30acd599e37d4284836024b0f3ed Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Thu, 28 Mar 2024 18:22:10 +0800 Subject: drm/amdgpu: implement IRQ_STATE_ENABLE for SDMA v4.4.2 SDMA_CNTL is not set in some cases, driver configures it by itself. v2: simplify code Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index 34237a1b1f2e..82eab49be82b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -1602,19 +1602,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev, u32 sdma_cntl; sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL); - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, - DRAM_ECC_INT_ENABLE, 0); - WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl); - break; - /* sdma ecc interrupt is enabled by default - * driver doesn't need to do anything to - * enable the interrupt */ - case AMDGPU_IRQ_STATE_ENABLE: - default: - break; - } + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE, + state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0); + WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl); return 0; } -- cgit v1.2.3-70-g09d2 From ecedd99a9369fb5cde601ae9abd58bca2739f1ae Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Fri, 15 Mar 2024 21:25:25 -0600 Subject: drm/amd/display: Skip on writeback when it's not applicable [WHY] dynamic memory safety error detector (KASAN) catches and generates error messages "BUG: KASAN: slab-out-of-bounds" as writeback connector does not support certain features which are not initialized. [HOW] Skip them when connector type is DRM_MODE_CONNECTOR_WRITEBACK. Link: https://gitlab.freedesktop.org/drm/amd/-/issues/3199 Reviewed-by: Harry Wentland Reviewed-by: Rodrigo Siqueira Acked-by: Roman Li Signed-off-by: Alex Hung Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 173438fc7537..6cf0d5f276fe 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3047,6 +3047,10 @@ static int dm_resume(void *handle) /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) @@ -5926,6 +5930,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, &aconnector->base.probed_modes : &aconnector->base.modes; + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return NULL; + if (aconnector->freesync_vid_base.clock != 0) return &aconnector->freesync_vid_base; @@ -8767,10 +8774,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; +notify: if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; -notify: aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); -- cgit v1.2.3-70-g09d2 From 3818708e9c9712e2ba4006bc23502ee7b031bd3f Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Thu, 28 Mar 2024 11:00:50 +0800 Subject: drm/amd/pm: fix the high voltage issue after unload fix the high voltage issue after unload on smu 13.0.10 Signed-off-by: Kenneth Feng Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 26 ++++++++++++--------- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 27 ++++++++++++++++++++-- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 + .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 8 ++++++- 4 files changed, 48 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index aa16d51dd842..7753a2e64d41 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4135,18 +4135,22 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->ip_blocks[i].status.hw = true; } } + } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) && + !amdgpu_device_has_display_hardware(adev)) { + r = psp_gpu_reset(adev); } else { - tmp = amdgpu_reset_method; - /* It should do a default reset when loading or reloading the driver, - * regardless of the module parameter reset_method. - */ - amdgpu_reset_method = AMD_RESET_METHOD_NONE; - r = amdgpu_asic_reset(adev); - amdgpu_reset_method = tmp; - if (r) { - dev_err(adev->dev, "asic reset on init failed\n"); - goto failed; - } + tmp = amdgpu_reset_method; + /* It should do a default reset when loading or reloading the driver, + * regardless of the module parameter reset_method. + */ + amdgpu_reset_method = AMD_RESET_METHOD_NONE; + r = amdgpu_asic_reset(adev); + amdgpu_reset_method = tmp; + } + + if (r) { + dev_err(adev->dev, "asic reset on init failed\n"); + goto failed; } } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 246b211b1e85..65333141b1c1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -735,7 +735,7 @@ static int smu_early_init(void *handle) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; smu->is_apu = false; - smu->smu_baco.state = SMU_BACO_STATE_EXIT; + smu->smu_baco.state = SMU_BACO_STATE_NONE; smu->smu_baco.platform_support = false; smu->user_dpm_profile.fan_mode = -1; @@ -1966,10 +1966,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) return 0; } +static int smu_reset_mp1_state(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int ret = 0; + + if ((!adev->in_runpm) && (!adev->in_suspend) && + (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) == + IP_VERSION(13, 0, 10) && + !amdgpu_device_has_display_hardware(adev)) + ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD); + + return ret; +} + static int smu_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = adev->powerplay.pp_handle; + int ret; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; @@ -1987,7 +2002,15 @@ static int smu_hw_fini(void *handle) adev->pm.dpm_enabled = false; - return smu_smc_hw_cleanup(smu); + ret = smu_smc_hw_cleanup(smu); + if (ret) + return ret; + + ret = smu_reset_mp1_state(smu); + if (ret) + return ret; + + return 0; } static void smu_late_fini(void *handle) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index a870bdd49a4e..1fa81575788c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -424,6 +424,7 @@ enum smu_reset_mode { enum smu_baco_state { SMU_BACO_STATE_ENTER = 0, SMU_BACO_STATE_EXIT, + SMU_BACO_STATE_NONE, }; struct smu_baco_context { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 9c03296f92cd..67117ced7c6a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2751,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, switch (mp1_state) { case PP_MP1_STATE_UNLOAD: - ret = smu_cmn_set_mp1_state(smu, mp1_state); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_PrepareMp1ForUnload, + 0x55, NULL); + + if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT) + ret = smu_v13_0_disable_pmfw_state(smu); + break; default: /* Ignore others */ -- cgit v1.2.3-70-g09d2 From f7e232de51bb1b45646e5b7dc4ebcf13510f2630 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Wed, 6 Mar 2024 17:05:07 +0530 Subject: drm/amdgpu: Fix VCN allocation in CPX partition VCN need not be shared in CPX mode always for all GFX 9.4.3 SOC SKUs. In certain configs, VCN instance can be exclusively allocated to a partition even under CPX mode. Signed-off-by: Lijo Lazar Reviewed-by: James Zhu Reviewed-by: Asad Kamal Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index d6f808acfb17..fbb43ae7624f 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; } +static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev) +{ + return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst); +} + static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, uint32_t inst_idx, struct amdgpu_ring *ring) { @@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, case AMDGPU_RING_TYPE_VCN_ENC: case AMDGPU_RING_TYPE_VCN_JPEG: ip_blk = AMDGPU_XCP_VCN; - if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE) + if (aqua_vanjaram_xcp_vcn_shared(adev)) inst_mask = 1 << (inst_idx * 2); break; default: @@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update( aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); - /* VCN is shared by two partitions under CPX MODE */ + /* VCN may be shared by two partitions under CPX MODE in certain + * configs. + */ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || - ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && - adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE) + ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && + aqua_vanjaram_xcp_vcn_shared(adev)) aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); } -- cgit v1.2.3-70-g09d2 From e33997e18d0fddd217a0fce988abbfd015338631 Mon Sep 17 00:00:00 2001 From: ZhenGuo Yin Date: Tue, 2 Apr 2024 11:41:05 +0800 Subject: drm/amdgpu: clear set_q_mode_offs when VM changed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [Why] set_q_mode_offs don't get cleared after GPU reset, nexting SET_Q_MODE packet to init shadow memory will be skiped, hence there has a page fault. [How] VM flush is needed after GPU reset, clear set_q_mode_offs when emitting VM flush. Fixes: 8bc75586ea01 ("drm/amdgpu: workaround to avoid SET_Q_MODE packets v2") Reviewed-by: Christian König Signed-off-by: ZhenGuo Yin Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 1770e496c1b7..2c82e2741619 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -5465,6 +5465,7 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring, /* Make sure that we can't skip the SET_Q_MODE packets when the VM * changed in any way. */ + ring->set_q_mode_offs = 0; ring->set_q_mode_ptr = NULL; } -- cgit v1.2.3-70-g09d2 From d06af584be5a769d124b7302b32a033e9559761d Mon Sep 17 00:00:00 2001 From: Zhigang Luo Date: Mon, 18 Mar 2024 14:13:10 -0400 Subject: amd/amdkfd: sync all devices to wait all processes being evicted If there are more than one device doing reset in parallel, the first device will call kfd_suspend_all_processes() to evict all processes on all devices, this call takes time to finish. other device will start reset and recover without waiting. if the process has not been evicted before doing recover, it will be restored, then caused page fault. Signed-off-by: Zhigang Luo Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 041ec3de55e7..719d6d365e15 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -960,7 +960,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { struct kfd_node *node; int i; - int count; if (!kfd->init_complete) return; @@ -968,12 +967,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) /* for runtime suspend, skip locking kfd */ if (!run_pm) { mutex_lock(&kfd_processes_mutex); - count = ++kfd_locked; - mutex_unlock(&kfd_processes_mutex); - /* For first KFD device suspend all the KFD processes */ - if (count == 1) + if (++kfd_locked == 1) kfd_suspend_all_processes(); + mutex_unlock(&kfd_processes_mutex); } for (i = 0; i < kfd->num_nodes; i++) { @@ -984,7 +981,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { - int ret, count, i; + int ret, i; if (!kfd->init_complete) return 0; @@ -998,12 +995,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) /* for runtime resume, skip unlocking kfd */ if (!run_pm) { mutex_lock(&kfd_processes_mutex); - count = --kfd_locked; - mutex_unlock(&kfd_processes_mutex); - - WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); - if (count == 0) + if (--kfd_locked == 0) ret = kfd_resume_all_processes(); + WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error"); + mutex_unlock(&kfd_processes_mutex); } return ret; -- cgit v1.2.3-70-g09d2 From 2cc69a10d83180f3de9f5afe3a98e972b1453d4c Mon Sep 17 00:00:00 2001 From: Alex Hung Date: Sat, 23 Mar 2024 12:02:54 -0600 Subject: drm/amd/display: Return max resolution supported by DWB mode_config's max width x height is 4096x2160 and is higher than DWB's max resolution 3840x2160 which is returned instead. Cc: stable@vger.kernel.org Reviewed-by: Harry Wentland Acked-by: Hamza Mahfooz Signed-off-by: Alex Hung Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c index 16e72d623630..08c494a7a21b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c @@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder, static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - - return drm_add_modes_noedid(connector, dev->mode_config.max_width, - dev->mode_config.max_height); + /* Maximum resolution supported by DWB */ + return drm_add_modes_noedid(connector, 3840, 2160); } static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector, -- cgit v1.2.3-70-g09d2 From bbca7f414ae9a12ea231cdbafd79c607e3337ea8 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Wed, 3 Apr 2024 17:28:44 +0800 Subject: drm/amdgpu: fix incorrect number of active RBs for gfx11 The RB bitmap should be global active RB bitmap & active RB bitmap based on active SA. Signed-off-by: Tim Huang Reviewed-by: Yifan Zhang Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 2c82e2741619..f7325b02a191 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1635,7 +1635,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev) active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa)); } - active_rb_bitmap |= global_active_rb_bitmap; + active_rb_bitmap &= global_active_rb_bitmap; adev->gfx.config.backend_enable_mask = active_rb_bitmap; adev->gfx.config.num_rbs = hweight32(active_rb_bitmap); } -- cgit v1.2.3-70-g09d2 From 81901d8d0472e9a19d294ae1dea76b950548195d Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Fri, 22 Mar 2024 15:02:45 -0400 Subject: drm/amd/display: always reset ODM mode in context when adding first plane [why] In current implemenation ODM mode is only reset when the last plane is removed from dc state. For any dc validate we will always remove all current planes and add new planes. However when switching from no planes to 1 plane, ODM mode is not reset because no planes get removed. This has caused an issue where we kept ODM combine when it should have been remove when a plane is added. The change is to reset ODM mode when adding the first plane. Cc: stable@vger.kernel.org Reviewed-by: Alvin Lee Acked-by: Hamza Mahfooz Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_state.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 5cc7f8da209c..61986e5cb491 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -436,6 +436,15 @@ bool dc_state_add_plane( goto out; } + if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) + /* ODM combine could prevent us from supporting more planes + * we will reset ODM slice count back to 1 when all planes have + * been removed to maximize the amount of planes supported when + * new planes are added. + */ + resource_update_pipes_for_stream_with_slice_count( + state, dc->current_state, dc->res_pool, stream, 1); + otg_master_pipe = resource_get_otg_master_for_stream( &state->res_ctx, stream); if (otg_master_pipe) -- cgit v1.2.3-70-g09d2 From 953927587f37b731abdeabe46ad44a3b3ec67a52 Mon Sep 17 00:00:00 2001 From: Dillon Varone Date: Thu, 21 Mar 2024 13:49:43 -0400 Subject: drm/amd/display: Do not recursively call manual trigger programming [WHY&HOW] We should not be recursively calling the manual trigger programming function when FAMS is not in use. Cc: stable@vger.kernel.org Reviewed-by: Alvin Lee Acked-by: Hamza Mahfooz Signed-off-by: Dillon Varone Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index f07a4c7e48bc..52eab8fccb7f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc) OTG_V_TOTAL_MAX_SEL, 1, OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); } } -- cgit v1.2.3-70-g09d2 From cf79814cb0bf5749b9f0db53ca231aa540c02768 Mon Sep 17 00:00:00 2001 From: Fudongwang Date: Tue, 26 Mar 2024 16:03:16 +0800 Subject: drm/amd/display: fix disable otg wa logic in DCN316 [Why] Wrong logic cause screen corruption. [How] Port logic from DCN35/314. Cc: stable@vger.kernel.org Reviewed-by: Nicholas Kazlauskas Acked-by: Hamza Mahfooz Signed-off-by: Fudongwang Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 12f3e8aa46d8..6ad4f4efec5d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa( return display_count; } -static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) +static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, + bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = safe_to_lower + ? &context->res_ctx.pipe_ctx[i] + : &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || - dc_is_virtual_signal(pipe->stream->signal))) { + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc)) { if (disable) { - pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); @@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn316_disable_otg_wa(clk_mgr_base, context, true); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn316_disable_otg_wa(clk_mgr_base, context, false); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; } -- cgit v1.2.3-70-g09d2 From 9e61ef8d219877202d4ee51d0d2ad9072c99a262 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 12 Mar 2024 11:55:52 -0400 Subject: drm/amd/display: Program VSC SDP colorimetry for all DP sinks >= 1.4 In order for display colorimetry to work correctly on DP displays we need to send the VSC SDP packet. We should only do so for panels with DPCD revision greater or equal to 1.4 as older receivers might have problems with it. Cc: stable@vger.kernel.org Cc: Joshua Ashton Cc: Xaver Hugl Cc: Melissa Wen Cc: Agustin Gutierrez Reviewed-by: Agustin Gutierrez Acked-by: Hamza Mahfooz Signed-off-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6cf0d5f276fe..d112ce6c812c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6318,7 +6318,9 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) { + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet @@ -6328,7 +6330,8 @@ create_stream_for_sink(struct drm_connector *connector, stream->use_vsc_sdp_for_colorimetry = aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; } else { - if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) + if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) stream->use_vsc_sdp_for_colorimetry = true; } if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) -- cgit v1.2.3-70-g09d2 From c3e2a5f2da904a18661335e8be2b961738574998 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 21 Mar 2024 11:13:38 -0400 Subject: drm/amd/display: Set VSC SDP Colorimetry same way for MST and SST The previous check for the is_vsc_sdp_colorimetry_supported flag for MST sink signals did nothing. Simplify the code and use the same check for MST and SST. Cc: stable@vger.kernel.org Reviewed-by: Agustin Gutierrez Acked-by: Hamza Mahfooz Signed-off-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d112ce6c812c..6d2f60c61dec 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6325,15 +6325,9 @@ create_stream_for_sink(struct drm_connector *connector, // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // - stream->use_vsc_sdp_for_colorimetry = false; - if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - stream->use_vsc_sdp_for_colorimetry = - aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; - } else { - if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && - stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) - stream->use_vsc_sdp_for_colorimetry = true; - } + stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; + if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); -- cgit v1.2.3-70-g09d2 From e047dd448d2bc12b8c30d7e3e6e98cea1fc28a17 Mon Sep 17 00:00:00 2001 From: Zhongwei Date: Wed, 27 Mar 2024 13:49:40 +0800 Subject: drm/amd/display: Adjust dprefclk by down spread percentage. [Why] OLED panels show no display for large vtotal timings. [How] Check if ss is enabled and read from lut for spread spectrum percentage. Adjust dprefclk as required. DP_DTO adjustment is for edp only. Cc: stable@vger.kernel.org Reviewed-by: Nicholas Kazlauskas Acked-by: Hamza Mahfooz Signed-off-by: Zhongwei Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c | 50 ++++++++++++++++++++++ .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 8 ++-- 2 files changed, 54 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index 101fe96287cb..d9c5692c86c2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -73,6 +73,12 @@ #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L +#define regCLK5_0_CLK5_spll_field_8 0x464b +#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0 + +#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd +#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L + #define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 #define REG(reg_name) \ @@ -411,6 +417,17 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs { } +static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc_context *ctx = clk_mgr->base.ctx; + uint32_t ssc_enable; + + REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable); + + return ssc_enable == 1; +} + static void init_clk_states(struct clk_mgr *clk_mgr) { struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); @@ -428,7 +445,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr) void dcn35_init_clocks(struct clk_mgr *clk_mgr) { + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); init_clk_states(clk_mgr); + + // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk + if (dcn35_is_spll_ssc_enabled(clk_mgr)) + clk_mgr->dp_dto_source_clock_in_khz = + dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz); + else + clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz; + } static struct clk_bw_params dcn35_bw_params = { .vram_type = Ddr4MemType, @@ -517,6 +543,28 @@ static DpmClocks_t_dcn35 dummy_clocks; static struct dcn35_watermarks dummy_wms = { 0 }; +static struct dcn35_ss_info_table ss_info_table = { + .ss_divider = 1000, + .ss_percentage = {0, 0, 375, 375, 375} +}; + +static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) +{ + struct dc_context *ctx = clk_mgr->base.ctx; + uint32_t clock_source; + + REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); + // If it's DFS mode, clock_source is 0. + if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) { + clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; + + if (clk_mgr->dprefclk_ss_percentage != 0) { + clk_mgr->ss_on_dprefclk = true; + clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider; + } + } +} + static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table) { int i, num_valid_sets; @@ -1061,6 +1109,8 @@ void dcn35_clk_mgr_construct( dce_clock_read_ss_info(&clk_mgr->base); /*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/ + dcn35_read_ss_info_from_lut(&clk_mgr->base); + clk_mgr->base.base.bw_params = &dcn35_bw_params; if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 970644b695cd..b5e0289d2fe8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk( struct bp_pixel_clock_parameters bp_pc_params = {0}; enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; - if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0) + // Apply ssed(spread spectrum) dpref clock for edp only. + if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 + && pix_clk_params->signal_type == SIGNAL_TYPE_EDP + && encoding == DP_8b_10b_ENCODING) dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz; // For these signal types Driver to program DP_DTO without calling VBIOS Command table if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) { @@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz( unsigned int modulo_hz = 0; unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz; - if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0) - dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz; - if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) { clock_hz = REG_READ(PHASE[inst]); -- cgit v1.2.3-70-g09d2 From 6dba20d23e85034901ccb765a7ca71199bcca4df Mon Sep 17 00:00:00 2001 From: Yifan Zhang Date: Sun, 7 Apr 2024 22:01:35 +0800 Subject: drm/amdgpu: differentiate external rev id for gfx 11.5.0 This patch to differentiate external rev id for gfx 11.5.0. Signed-off-by: Yifan Zhang Reviewed-by: Tim Huang Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/soc21.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index abe319b0f063..43ca63fe85ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -720,7 +720,10 @@ static int soc21_common_early_init(void *handle) AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_JPEG | AMD_PG_SUPPORT_GFX_PG; - adev->external_rev_id = adev->rev_id + 0x1; + if (adev->rev_id == 0) + adev->external_rev_id = 0x1; + else + adev->external_rev_id = adev->rev_id + 0x10; break; case IP_VERSION(11, 5, 1): adev->cg_flags = -- cgit v1.2.3-70-g09d2 From 3b0daecfeac0103aba8b293df07a0cbaf8b43f29 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Fri, 12 Apr 2024 06:11:25 +1000 Subject: amdkfd: use calloc instead of kzalloc to avoid integer overflow This uses calloc instead of doing the multiplication which might overflow. Cc: stable@vger.kernel.org Signed-off-by: Dave Airlie --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index f9631f4b1a02..55aa74cbc532 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -779,8 +779,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp, * nodes, but not more than args->num_of_nodes as that is * the amount of memory allocated by user */ - pa = kzalloc((sizeof(struct kfd_process_device_apertures) * - args->num_of_nodes), GFP_KERNEL); + pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures), + GFP_KERNEL); if (!pa) return -ENOMEM; -- cgit v1.2.3-70-g09d2 From 6b0d78032f98d494367a9293d584be2ac1173fb4 Mon Sep 17 00:00:00 2001 From: Luqmaan Irshad Date: Tue, 2 Apr 2024 17:33:46 -0400 Subject: drm/amd/amdgpu: Update PF2VF Header Adding a new field for GPU Capacity to align the header with the host. Signed-off-by: Luqmaan Irshad Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 0de78d6a83fe..fb2b394bb9c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -158,7 +158,7 @@ struct amd_sriov_msg_pf2vf_info_header { uint32_t reserved[2]; }; -#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (48) +#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (49) struct amd_sriov_msg_pf2vf_info { /* header contains size and version */ struct amd_sriov_msg_pf2vf_info_header header; @@ -209,6 +209,8 @@ struct amd_sriov_msg_pf2vf_info { struct amd_sriov_msg_uuid_info uuid_info; /* PCIE atomic ops support flag */ uint32_t pcie_atomic_ops_support_flags; + /* Portion of GPU memory occupied by VF. MAX value is 65535, but set to uint32_t to maintain alignment with reserved size */ + uint32_t gpu_capacity; /* reserved */ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE]; }; -- cgit v1.2.3-70-g09d2 From a0e002cdac429d7b2ae8b66baf51263193a935c8 Mon Sep 17 00:00:00 2001 From: Jack Xiao Date: Tue, 9 Apr 2024 17:31:01 +0800 Subject: drm/amdgpu/sdma6: set sdma hang watchdog Set SDMAx_WATCHDOG_CNTL.QUEUE_HANG_COUNT registers to improve SDMA reliability. Signed-off-by: Jack Xiao Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 361835a61f2e..67a4d8b1512b 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -507,6 +507,13 @@ static int sdma_v6_0_gfx_resume(struct amdgpu_device *adev) /* set minor_ptr_update to 0 after wptr programed */ WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0); + /* Set up sdma hang watchdog */ + temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL)); + /* 100ms per unit */ + temp = REG_SET_FIELD(temp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT, + max(adev->usec_timeout/100000, 1)); + WREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), temp); + /* Set up RESP_MODE to non-copy addresses */ temp = RREG32_SOC15_IP(GC, sdma_v6_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL)); temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3); -- cgit v1.2.3-70-g09d2 From c8962679af3538deaf6d90e90bbdceb0f66b6e98 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 15 Mar 2024 13:07:53 +0100 Subject: drm/amdgpu: remove invalid resource->start check v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The majority of those where removed in the commit aed01a68047b ("drm/amdgpu: Remove TTM resource->start visible VRAM condition v2") But this one was missed because it's working on the resource and not the BO. Since we also no longer use a fake start address for visible BOs this will now trigger invalid mapping errors. v2: also remove the unused variable Signed-off-by: Christian König Fixes: aed01a68047b ("drm/amdgpu: Remove TTM resource->start visible VRAM condition v2") CC: stable@vger.kernel.org Acked-by: Pierre-Eric Pelloux-Prayer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fc418e670fda..6417cb76ccd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -557,7 +557,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - size_t bus_size = (size_t)mem->size; switch (mem->mem_type) { case TTM_PL_SYSTEM: @@ -568,9 +567,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; - /* check if it's visible */ - if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) - return -EINVAL; if (adev->mman.aper_base_kaddr && mem->placement & TTM_PL_FLAG_CONTIGUOUS) -- cgit v1.2.3-70-g09d2 From a41aa6a7d0a6687fe4eff9822cb799082cc0975c Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 26 Mar 2024 08:38:55 -0600 Subject: drm/amd/display: Add comments to improve the code readability This commit just introduce some basic comments that helps to understand the overall behavior of some structs. Acked-by: Hamza Mahfooz Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 ++ drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c | 1 - drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 3fab7c5bf093..028b2f971e36 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -339,7 +339,9 @@ struct stream_resource { }; struct plane_resource { + /* scl_data is scratch space required to program a plane */ struct scaler_data scl_data; + /* Below pointers to hw objects are required to enable the plane */ struct hubp *hubp; struct mem_input *mi; struct input_pixel_processor *ipp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index 35a2cce0c2b8..56ee45e12b46 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -56,7 +56,6 @@ #include "dce/dce_aux.h" #include "dce/dce_abm.h" #include "dce/dce_i2c.h" -/* TODO remove this include */ #ifndef mmMC_HUB_RDREQ_DMIF_LIMIT #include "gmc/gmc_7_1_d.h" diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index 914b234d7f6b..0d2974c9c823 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -182,6 +182,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn201_soc = { .socclk_mhz = 1254.0, .dram_speed_mts = 14000.0, }, + /* state4 is not an actual state, just defines unsupported for dml*/ { .state = 4, .dscclk_mhz = 400.0, -- cgit v1.2.3-70-g09d2 From 2c84f4c1df056366fee0cd0071c0a1215af5ff24 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 26 Mar 2024 09:03:50 -0600 Subject: drm/amd/display: Adjust some includes used by display Some of the includes used in the DC can be removed and others need to be update. This commit adjusts some of those headers in the display code. Acked-by: Hamza Mahfooz Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c | 1 - drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c | 1 - drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c | 1 - drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c | 1 - drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c | 2 +- drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c | 1 - drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c | 1 - 7 files changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 2a74e2d74909..369421e46c52 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -23,7 +23,6 @@ * */ -#include "reg_helper.h" #include "core_types.h" #include "clk_mgr_internal.h" #include "rv1_clk_mgr.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index 89b79dd39628..19897fa52e7e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -26,7 +26,6 @@ #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" -#include #include "rv1_clk_mgr_vbios_smu.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c index 64c2b88dfc9f..3253115a153d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c @@ -23,7 +23,6 @@ * */ -#include #include "dcn30_clk_mgr_smu_msg.h" #include "clk_mgr_internal.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 6904e95113c1..f201628e4e98 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -23,7 +23,6 @@ * */ -#include #include "core_types.h" #include "clk_mgr_internal.h" #include "reg_helper.h" diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c index 879f1494c4cd..2d14346b680e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c @@ -29,6 +29,7 @@ #include "dm_helpers.h" #include "dcn315_smu.h" #include "mp/mp_13_0_5_offset.h" +#include "logger_types.h" #define MAX_INSTANCE 6 #define MAX_SEGMENT 6 @@ -69,7 +70,6 @@ static const struct IP_BASE NBIO_BASE = { { { { 0x00000000, 0x00000014, 0x00000D #define REG_NBIO(reg_name) \ (NBIO_BASE.instance[0].segment[regBIF_BX_PF2_ ## reg_name ## _BASE_IDX] + regBIF_BX_PF2_ ## reg_name) -#include "logger_types.h" #undef DC_LOGGER #define DC_LOGGER \ CTX->logger diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c index fbcd8fb58ea8..c8c55f196f8d 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c @@ -24,7 +24,6 @@ */ #include "link_dp_trace.h" #include "link/protocols/link_dpcd.h" -#include "link.h" void dp_trace_init(struct dc_link *link) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index 0d2974c9c823..bc65f82b111d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -55,7 +55,6 @@ #include "dce110/dce110_resource.h" #include "dce/dce_aux.h" #include "dce/dce_i2c.h" -#include "dcn201/dcn201_hubbub.h" #include "dcn10/dcn10_resource.h" #include "cyan_skillfish_ip_offset.h" -- cgit v1.2.3-70-g09d2 From 6d4279cb99ac4f51d10409501d29969f687ac8dc Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 26 Mar 2024 10:42:05 -0600 Subject: drm/amd/display: Drop legacy code This commit removes code that are not used by display anymore. Acked-by: Hamza Mahfooz Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/inc/hw/stream_encoder.h | 4 --- drivers/gpu/drm/amd/display/dc/inc/resource.h | 7 ----- .../gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c | 10 ------- .../amd/display/dc/resource/dcn21/dcn21_resource.c | 33 +--------------------- 4 files changed, 1 insertion(+), 53 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index a15efadb9183..75b9ec21f297 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -178,10 +178,6 @@ struct stream_encoder_funcs { void (*stop_dp_info_packets)( struct stream_encoder *enc); - void (*reset_fifo)( - struct stream_encoder *enc - ); - void (*dp_blank)( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 7cb0ed612716..361ad6b16b96 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -584,13 +584,6 @@ bool get_temp_dp_link_res(struct dc_link *link, struct link_resource *link_res, struct dc_link_settings *link_settings); -#if defined(CONFIG_DRM_AMD_DC_FP) -struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( - const struct resource_context *res_ctx, - const struct resource_pool *pool, - const struct dc_link *link); -#endif - void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c index 58bdbd859bf9..d6f095b4555d 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c @@ -462,16 +462,6 @@ void optc2_setup_manual_trigger(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); - /* Set the min/max selectors unconditionally so that - * DMCUB fw may change OTG timings when necessary - * TODO: Remove the w/a after fixing the issue in DMCUB firmware - */ - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, - OTG_V_TOTAL_MIN_SEL, 1, - OTG_V_TOTAL_MAX_SEL, 1, - OTG_FORCE_LOCK_ON_EVENT, 0, - OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ - REG_SET_8(OTG_TRIGA_CNTL, 0, OTG_TRIGA_SOURCE_SELECT, 21, OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 65d337731f56..8663cbc3d1cf 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -581,32 +581,6 @@ static const struct resource_caps res_cap_rn = { .num_dsc = 3, }; -#ifdef DIAGS_BUILD -static const struct resource_caps res_cap_rn_FPGA_4pipe = { - .num_timing_generator = 4, - .num_opp = 4, - .num_video_plane = 4, - .num_audio = 7, - .num_stream_encoder = 4, - .num_pll = 4, - .num_dwb = 1, - .num_ddc = 4, - .num_dsc = 0, -}; - -static const struct resource_caps res_cap_rn_FPGA_2pipe_dsc = { - .num_timing_generator = 2, - .num_opp = 2, - .num_video_plane = 2, - .num_audio = 7, - .num_stream_encoder = 2, - .num_pll = 4, - .num_dwb = 1, - .num_ddc = 4, - .num_dsc = 2, -}; -#endif - static const struct dc_plane_cap plane_cap = { .type = DC_PLANE_TYPE_DCN_UNIVERSAL, .per_pixel_alpha = true, @@ -1415,16 +1389,11 @@ static bool dcn21_resource_construct( struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; uint32_t pipe_fuses = read_pipe_fuses(ctx); - uint32_t num_pipes; + uint32_t num_pipes = 0; ctx->dc_bios->regs = &bios_regs; pool->base.res_cap = &res_cap_rn; -#ifdef DIAGS_BUILD - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) - //pool->base.res_cap = &res_cap_nv10_FPGA_2pipe_dsc; - pool->base.res_cap = &res_cap_rn_FPGA_4pipe; -#endif pool->base.funcs = &dcn21_res_pool_funcs; -- cgit v1.2.3-70-g09d2 From a842b7fe7f807c928ad8eee794b4682d479c2028 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 26 Mar 2024 11:11:54 -0600 Subject: drm/amd/display: Update resource capabilities and debug struct for DCN201 Some of the resource capabilities for DCN201 and the debug default option are outdated. This commit just set some of the missing configurations for DCN201. Acked-by: Hamza Mahfooz Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index bc65f82b111d..070a4efb308b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -566,6 +566,8 @@ static const struct resource_caps res_cap_dnc201 = { .num_audio = 2, .num_stream_encoder = 2, .num_pll = 2, + .num_dwb = 0, + .num_dsc = 0, .num_ddc = 2, }; @@ -612,7 +614,7 @@ static const struct dc_debug_options debug_defaults_drv = { .scl_reset_length10 = true, .sanity_checks = false, .underflow_assert_delay_us = 0xFFFFFFFF, - .enable_tri_buf = false, + .enable_tri_buf = true, .enable_legacy_fast_update = true, .using_dml2 = false, }; -- cgit v1.2.3-70-g09d2 From 9c78dc956a51d05e4f3a5980aff0b083b8cf3a4d Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 26 Mar 2024 11:23:40 -0600 Subject: drm/amd/display: Disable P010 Support of DCN 1.0 [WHY] DCN 1.0 is not ready for the P010 support. [HOW] 1. Set the P010 plan_cap of DCN 1.0 to be false. 2. Let the DM do the plan cap initialization of DCN 1.0. Reviewed-by: Rodrigo Siqueira Signed-off-by: Figo Wang Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index d08d10969251..81fa2ac781f9 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -513,7 +513,7 @@ static const struct dc_plane_cap plane_cap = { .argb8888 = true, .nv12 = true, .fp16 = true, - .p010 = true + .p010 = false }, .max_upscale_factor = { -- cgit v1.2.3-70-g09d2 From 7dc363e66258ae41823e0f360101a1e10ca6d4cb Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 26 Mar 2024 11:48:40 -0600 Subject: drm/amd/display: Update DCN10 resource Update DCN10 to use legacy fast update and ensure that the MPCC count is the same as the pipe_count. Acked-by: Hamza Mahfooz Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index 81fa2ac781f9..563c5eec83ff 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -569,6 +569,7 @@ static const struct dc_debug_options debug_defaults_diags = { .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, .underflow_assert_delay_us = 0xFFFFFFFF, + .enable_legacy_fast_update = true, }; static void dcn10_dpp_destroy(struct dpp **dpp) @@ -1631,6 +1632,7 @@ static bool dcn10_resource_construct( /* valid pipe num */ pool->base.pipe_count = j; pool->base.timing_generator_count = j; + pool->base.mpcc_count = j; /* within dml lib, it is hard code to 4. If ASIC pipe is fused, * the value may be changed -- cgit v1.2.3-70-g09d2 From d76c0a23b557c6ebb3fac32548100d76a1e0ce23 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 26 Mar 2024 11:49:50 -0600 Subject: drm/amd/display: Add fallback configuration when set DRR Set OTG/OPTC parameter to 0 if failed to set DRR. Acked-by: Hamza Mahfooz Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index 52eab8fccb7f..c4f0e1951427 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -293,9 +293,16 @@ static void optc32_set_drr( } optc->funcs->set_vtotal_min_max(optc, params->vertical_total_min - 1, params->vertical_total_max - 1); - } + optc32_setup_manual_trigger(optc); + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); - optc32_setup_manual_trigger(optc); + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } } static struct timing_generator_funcs dcn32_tg_funcs = { -- cgit v1.2.3-70-g09d2 From bc87d666c05a13e6d4ae1ddce41fc43d2567b9a2 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 26 Mar 2024 11:55:19 -0600 Subject: drm/amd/display: Add fallback configuration for set DRR in DCN10 Set OTG/OPTC parameters to 0 if something goes wrong on DCN10. Acked-by: Hamza Mahfooz Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c index f109a101d84f..5574bc628053 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c @@ -945,10 +945,19 @@ void optc1_set_drr( OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK_EN, 0, OTG_SET_V_TOTAL_MIN_MASK, 0); - } - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); + // Setup manual flow control for EOF via TRIG_A + optc->funcs->setup_manual_trigger(optc); + + } else { + REG_UPDATE_4(OTG_V_TOTAL_CONTROL, + OTG_SET_V_TOTAL_MIN_MASK, 0, + OTG_V_TOTAL_MIN_SEL, 0, + OTG_V_TOTAL_MAX_SEL, 0, + OTG_FORCE_LOCK_ON_EVENT, 0); + + optc->funcs->set_vtotal_min_max(optc, 0, 0); + } } void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max) -- cgit v1.2.3-70-g09d2 From 38caf642e547c34b27d0d5d0ce2659623edfe40c Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 26 Mar 2024 14:46:54 -0600 Subject: drm/amd/display: Change DPCD address range Change DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT with DP_PHY_REPEATER_128B132B_RATES. Acked-by: Hamza Mahfooz Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c index c5de6ed5bf58..a72c898b64fa 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c @@ -130,7 +130,7 @@ static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint3 * XXX: Do not allow any two address ranges in this array to overlap */ static const struct dpcd_address_range mandatory_dpcd_blocks[] = { - { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }}; + { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_128B132B_RATES }}; /* * extend addresses to read all mandatory blocks together -- cgit v1.2.3-70-g09d2 From 4135899209f9f1d9a90c46ed9b582e88dd5eae0b Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 10 Apr 2024 15:52:10 -0400 Subject: drm/amdkfd: Fix memory leak in create_process failure Fix memory leak due to a leaked mmget reference on an error handling code path that is triggered when attempting to create KFD processes while a GPU reset is in progress. Fixes: 0ab2d7532b05 ("drm/amdkfd: prepare per-process debug enable and disable") CC: Xiaogang Chen Signed-off-by: Felix Kuehling Tested-by: Harish Kasiviswanthan Reviewed-by: Mukul Joshi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 717a60d7a4ea..b79986412cd8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) mutex_lock(&kfd_processes_mutex); if (kfd_is_locked()) { - mutex_unlock(&kfd_processes_mutex); pr_debug("KFD is locked! Cannot create process"); - return ERR_PTR(-EINVAL); + process = ERR_PTR(-EINVAL); + goto out; } /* A prior open of /dev/kfd could have already created the process. */ -- cgit v1.2.3-70-g09d2 From 5c786f1c5434b59c5abe1b087c6abd30f3747f6b Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 27 Mar 2024 09:41:41 -0600 Subject: drm/amd/display: Reorganize dwb header This commit makes some small adjustments in the dwb header. Acked-by: Hamza Mahfooz Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h | 30 ++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h index 729ca0064e94..063efc8128a7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h @@ -147,9 +147,10 @@ struct dwb_caps { unsigned int support_ogam :1; unsigned int support_wbscl :1; unsigned int support_ocsc :1; - unsigned int support_stereo :1; + unsigned int support_stereo :1; + unsigned int support_4k_120p :1; } caps; - unsigned int reserved2[9]; /* Reserved for future use, MUST BE 0. */ + unsigned int reserved2[10]; /* Reserved for future use, MUST BE 0. */ }; struct dwbc { @@ -166,8 +167,9 @@ struct dwbc { bool dwb_is_drc; int wb_src_plane_inst;/*hubp, mpcc, inst*/ uint32_t mask_id; - int otg_inst; - bool mvc_cfg; + int otg_inst; + bool mvc_cfg; + struct dc_dwb_params params; }; struct dwbc_funcs { @@ -192,6 +194,10 @@ struct dwbc_funcs { struct dwbc *dwbc, enum dwb_frame_capture_enable enable); + void (*dwb_set_scaler)( + struct dwbc *dwbc, + struct dc_dwb_params *params); + void (*set_stereo)( struct dwbc *dwbc, struct dwb_stereo_params *stereo_params); @@ -205,9 +211,11 @@ struct dwbc_funcs { struct dwbc *dwbc, struct dwb_warmup_params *warmup_params); - + bool (*dwb_get_mcifbuf_line)( + struct dwbc *dwbc, unsigned int *buf_idx, + unsigned int *cur_line, + unsigned int *over_run); #if defined(CONFIG_DRM_AMD_DC_FP) - void (*dwb_program_output_csc)( struct dwbc *dwbc, enum dc_color_space color_space, @@ -216,17 +224,17 @@ struct dwbc_funcs { bool (*dwb_ogam_set_output_transfer_func)( struct dwbc *dwbc, const struct dc_transfer_func *in_transfer_func_dwb_ogam); - +#endif //TODO: merge with output_transfer_func? bool (*dwb_ogam_set_input_transfer_func)( struct dwbc *dwbc, const struct dc_transfer_func *in_transfer_func_dwb_ogam); -#endif + + void (*get_drr_time_stamp)( + struct dwbc *dwbc, uint32_t *time_stamp); + bool (*get_dwb_status)( struct dwbc *dwbc); - void (*dwb_set_scaler)( - struct dwbc *dwbc, - struct dc_dwb_params *params); }; #endif -- cgit v1.2.3-70-g09d2 From b0f52f1807338dd77ca04c909bfa541403ea5b36 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 31 Mar 2024 18:48:38 -0400 Subject: drm/amd/display: 3.2.280 This version brings along the following: - DCN10 fixes - DCN316 fixes - DML2 fixes - DWB fixes - Expanded FAMS support - Misc code style fixes - ODM fixes - VSC SDP fixes Acked-by: Hamza Mahfooz Signed-off-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 54534df73e83..db64b0061d70 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -53,7 +53,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.279" +#define DC_VER "3.2.280" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3-70-g09d2 From 97d9e8cea224c31f109ff5c07a4b512da448e3db Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Wed, 27 Mar 2024 18:04:09 -0400 Subject: drm/amd/display: Modify power sequence Need to update the power sequence to help prevent potential issues like multi-display or multi-plane. Reviewed-by: Duncan Ma Acked-by: Rodrigo Siqueira Signed-off-by: Sung Joon Kim Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/hwss/Makefile | 2 +- .../gpu/drm/amd/display/dc/hwss/dcn351/Makefile | 25 ++- .../drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c | 182 +++++++++++++++++++++ .../drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h | 41 +++++ .../drm/amd/display/dc/hwss/dcn351/dcn351_init.c | 9 +- 5 files changed, 247 insertions(+), 12 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c create mode 100644 drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile index 9e8e9de51a92..cf8aa23b4415 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile @@ -180,7 +180,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35) ############################################################################### -HWSS_DCN351 = dcn351_init.o +HWSS_DCN351 = dcn351_hwseq.o dcn351_init.o AMD_DAL_HWSS_DCN351 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn351/,$(HWSS_DCN351)) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile index b24ad27fe6ef..a4b3c1e99ec6 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile @@ -1,16 +1,27 @@ # -# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved +# Copyright (c) 2022-2024 Advanced Micro Devices, Inc. # -# All rights reserved. This notice is intended as a precaution against -# inadvertent publication and does not imply publication or any waiver -# of confidentiality. The year included in the foregoing notice is the -# year of creation of the work. +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: # -# Authors: AMD +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. # # Makefile for DCN351. -DCN351 = dcn351_init.o +DCN351 = dcn351_hwseq.o dcn351_init.o AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351)) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c new file mode 100644 index 000000000000..93fe5b262a3d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "core_types.h" +#include "resource.h" +#include "dcn351_hwseq.h" +#include "dcn35/dcn35_hwseq.h" + +#define DC_LOGGER_INIT(logger) \ + struct dal_logger *dc_logger = logger + +#define DC_LOGGER \ + dc_logger + +void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state) +{ + int i, j; + + dcn35_calc_blocks_to_gate(dc, context, update_state); + + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (!update_state->pg_pipe_res_update[PG_HUBP][i] && + !update_state->pg_pipe_res_update[PG_DPP][i]) { + for (j = i - 1; j >= 0; j--) { + update_state->pg_pipe_res_update[PG_HUBP][j] = false; + update_state->pg_pipe_res_update[PG_DPP][j] = false; + } + + break; + } + } +} + +void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state) +{ + int i, j; + + dcn35_calc_blocks_to_ungate(dc, context, update_state); + + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + for (j = i - 1; j >= 0; j--) { + update_state->pg_pipe_res_update[PG_HUBP][j] = true; + update_state->pg_pipe_res_update[PG_DPP][j] = true; + } + + break; + } + } +} + +/** + * dcn351_hw_block_power_down() - power down sequence + * + * The following sequence describes the ON-OFF (ONO) for power down: + * + * ONO Region 11, DCPG 19: dsc3 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 3, DCPG 25: hpo - SKIPPED. Should be kept on + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn351_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state) +{ + int i = 0; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + + if (!pg_cntl || dc->debug.ignore_pg) + return; + + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (update_state->pg_pipe_res_update[PG_DSC][i]) { + if (pg_cntl->funcs->dsc_pg_control) + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false); + } + + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (pg_cntl->funcs->hubp_dpp_pg_control) + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false); + } + } + + // domain25 currently always on. + + /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false); + + // domain23 currently always on. + // domain22 currently always on. +} + +/** + * dcn351_hw_block_power_up() - power up sequence + * + * The following sequence describes the ON-OFF (ONO) for power up: + * + * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED + * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit + * ONO Region 2, DCPG 24: mpc opp optc dwb + * ONO Region 3, DCPG 25: hpo - SKIPPED + * ONO Region 4, DCPG 0: dchubp0, dpp0 + * ONO Region 5, DCPG 16: dsc0 + * ONO Region 6, DCPG 1: dchubp1, dpp1 + * ONO Region 7, DCPG 17: dsc1 + * ONO Region 8, DCPG 2: dchubp2, dpp2 + * ONO Region 9, DCPG 18: dsc2 + * ONO Region 10, DCPG 3: dchubp3, dpp3 + * ONO Region 11, DCPG 19: dsc3 + * + * @dc: Current DC state + * @update_state: update PG sequence states for HW block + */ +void dcn351_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state) +{ + int i = 0; + struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl; + + if (!pg_cntl || dc->debug.ignore_pg) + return; + + // domain22 currently always on. + // domain23 currently always on. + + /* this will need all the clients to unregister optc interrupts, let dmubfw handle this */ + if (pg_cntl->funcs->plane_otg_pg_control) + pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true); + + // domain25 currently always on. + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (update_state->pg_pipe_res_update[PG_HUBP][i] && + update_state->pg_pipe_res_update[PG_DPP][i]) { + if (pg_cntl->funcs->hubp_dpp_pg_control) + pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true); + } + + if (update_state->pg_pipe_res_update[PG_DSC][i]) { + if (pg_cntl->funcs->dsc_pg_control) + pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true); + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h new file mode 100644 index 000000000000..6d8f3bfb668e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2024 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN351_H__ +#define __DC_HWSS_DCN351_H__ + +#include "hw_sequencer_private.h" + +void dcn351_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state); +void dcn351_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, + struct pg_block_update *update_state); +void dcn351_hw_block_power_up(struct dc *dc, + struct pg_block_update *update_state); +void dcn351_hw_block_power_down(struct dc *dc, + struct pg_block_update *update_state); + +#endif /* __DC_HWSS_DCN351_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index c54f3518c947..c4944478ed91 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -32,6 +32,7 @@ #include "dcn31/dcn31_hwseq.h" #include "dcn32/dcn32_hwseq.h" #include "dcn35/dcn35_hwseq.h" +#include "dcn351/dcn351_hwseq.h" #include "dcn351_init.h" @@ -115,10 +116,10 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .update_visual_confirm_color = dcn10_update_visual_confirm_color, .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, .update_dsc_pg = dcn32_update_dsc_pg, - .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, - .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, - .hw_block_power_up = dcn35_hw_block_power_up, - .hw_block_power_down = dcn35_hw_block_power_down, + .calc_blocks_to_gate = dcn351_calc_blocks_to_gate, + .calc_blocks_to_ungate = dcn351_calc_blocks_to_ungate, + .hw_block_power_up = dcn351_hw_block_power_up, + .hw_block_power_down = dcn351_hw_block_power_down, .root_clock_control = dcn35_root_clock_control, }; -- cgit v1.2.3-70-g09d2 From ab956ed95b8bc4a65c913d7057075866d5fc3724 Mon Sep 17 00:00:00 2001 From: Chris Park Date: Tue, 2 Apr 2024 02:06:00 -0400 Subject: drm/amd/display: Add a function for checking tmds mode [Why] DVI is TMDS signal like HDMI but without audio. Current signal check does not correctly reflect DVI clock programming. [How] Define a new signal check for TMDS that includes DVI to HDMI TMDS programming. Reviewed-by: Dillon Varone Acked-by: Rodrigo Siqueira Signed-off-by: Chris Park Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/signal_types.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index 1b14b17a79c7..a10d6b988aab 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -118,6 +118,19 @@ static inline bool dc_is_dvi_signal(enum signal_type signal) } } +static inline bool dc_is_tmds_signal(enum signal_type signal) +{ + switch (signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + return true; + break; + default: + return false; + } +} + static inline bool dc_is_dvi_single_link_signal(enum signal_type signal) { return (signal == SIGNAL_TYPE_DVI_SINGLE_LINK); -- cgit v1.2.3-70-g09d2 From 63985d9adf8ca54e0ca73f79f5a64f6101b74523 Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Mon, 1 Apr 2024 16:45:10 -0400 Subject: drm/amd/display: Modify resource allocation logic To reduce the complexity of pipe resource allocation for different use-cases, now we search for any free pipe sequentially rather than from bottom up. Reviewed-by: Wenjing Liu Acked-by: Rodrigo Siqueira Signed-off-by: Sung Joon Kim Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/resource/dcn32/dcn32_resource.c | 4 +-- .../amd/display/dc/resource/dcn32/dcn32_resource.h | 6 ++++ .../display/dc/resource/dcn351/dcn351_resource.c | 35 +++++++++++++++++++++- .../display/dc/resource/dcn351/dcn351_resource.h | 6 ++++ 4 files changed, 48 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 9aa39bd25be9..c16e915686fc 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -2547,7 +2547,7 @@ struct resource_pool *dcn32_create_resource_pool( * full update which delays the flip for 1 frame. If we use the original pipe * we don't have to toggle its power. So we can flip faster. */ -static int find_optimal_free_pipe_as_secondary_dpp_pipe( +int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( const struct resource_context *cur_res_ctx, struct resource_context *new_res_ctx, const struct resource_pool *pool, @@ -2730,7 +2730,7 @@ struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( return dcn32_acquire_idle_pipe_for_head_pipe_in_layer( new_ctx, pool, opp_head_pipe->stream, opp_head_pipe); - free_pipe_idx = find_optimal_free_pipe_as_secondary_dpp_pipe( + free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( &cur_ctx->res_ctx, &new_ctx->res_ctx, pool, opp_head_pipe); if (free_pipe_idx >= 0) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 286e20ad46ed..fee67fbab8e2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -137,6 +137,12 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); bool dcn32_is_center_timing(struct pipe_ctx *pipe); bool dcn32_is_psr_capable(struct pipe_ctx *pipe); +int dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( + const struct resource_context *cur_res_ctx, + struct resource_context *new_res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *new_opp_head); + struct pipe_ctx *dcn32_acquire_free_pipe_as_secondary_dpp_pipe( const struct dc_state *cur_ctx, struct dc_state *new_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 8a57adb27264..cc1a44a890b5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1728,6 +1728,37 @@ static bool dcn351_validate_bandwidth(struct dc *dc, return out; } +struct pipe_ctx *dcn351_acquire_free_pipe_as_secondary_dpp_pipe( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *opp_head_pipe) +{ + int free_pipe_idx; + struct pipe_ctx *free_pipe; + + free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( + &cur_ctx->res_ctx, &new_ctx->res_ctx, + pool, opp_head_pipe); + if (free_pipe_idx >= 0) { + free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; + free_pipe->pipe_idx = free_pipe_idx; + free_pipe->stream = opp_head_pipe->stream; + free_pipe->stream_res.tg = opp_head_pipe->stream_res.tg; + free_pipe->stream_res.opp = opp_head_pipe->stream_res.opp; + + free_pipe->plane_res.hubp = pool->hubps[free_pipe->pipe_idx]; + free_pipe->plane_res.ipp = pool->ipps[free_pipe->pipe_idx]; + free_pipe->plane_res.dpp = pool->dpps[free_pipe->pipe_idx]; + free_pipe->plane_res.mpcc_inst = + pool->dpps[free_pipe->pipe_idx]->inst; + } else { + ASSERT(opp_head_pipe); + free_pipe = NULL; + } + + return free_pipe; +} static struct resource_funcs dcn351_res_pool_funcs = { .destroy = dcn351_destroy_resource_pool, @@ -1740,7 +1771,8 @@ static struct resource_funcs dcn351_res_pool_funcs = { .calculate_wm_and_dlg = NULL, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn351_acquire_free_pipe_as_secondary_dpp_pipe, + .acquire_free_pipe_as_secondary_opp_head = dcn32_acquire_free_pipe_as_secondary_opp_head, .release_pipe = dcn20_release_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, @@ -2130,6 +2162,7 @@ static bool dcn351_resource_construct( dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ + dc->dml2_options.map_dc_pipes_with_callbacks = true; if (dc->config.sdpif_request_limit_words_per_umc == 0) dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h index f3e045777a3d..e4553c5100f8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h @@ -20,4 +20,10 @@ struct resource_pool *dcn351_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc); +struct pipe_ctx *dcn351_acquire_free_pipe_as_secondary_dpp_pipe( + const struct dc_state *cur_ctx, + struct dc_state *new_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *opp_head_pipe); + #endif /* _DCN351_RESOURCE_H_ */ -- cgit v1.2.3-70-g09d2 From d7fedf2656462c2c20d6d98446e1cdf1c0edc0aa Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Tue, 2 Apr 2024 16:03:08 -0400 Subject: drm/amd/display: limit the code change to ips enabled asic Limit the code change for ips enable to reduce the impact for now. Also exit_ips first before dc_power_down to avoid 0x9f. Reviewed-by: Chris Park Acked-by: Rodrigo Siqueira Signed-off-by: Charlene Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 145cdab92ca0..01c75b66e8f1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -5042,8 +5042,13 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src) void dc_power_down_on_boot(struct dc *dc) { if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW && - dc->hwss.power_down_on_boot) + dc->hwss.power_down_on_boot) { + + if (dc->caps.ips_support) + dc_exit_ips_for_hw_access(dc); + dc->hwss.power_down_on_boot(dc); + } } void dc_set_power_state( -- cgit v1.2.3-70-g09d2 From ef319dff5475a2d7b9b61c0fb051a764237cd959 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Mon, 1 Apr 2024 13:51:58 -0400 Subject: drm/amd/display: add support for chroma offset [Why] Adding support for chroma subsampling offset (cositing) in scaler calculations to adjust reference point where we determine post-scaling chroma value in YUV420 surfaces. [How] Add support for cositing options: NONE, LEFT and TOPLEFT Add debug option force_cositing and set default to TOPLEFT to maintain same behaviour as without offset support. Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Samson Tam Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 3 +++ drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 7 +++++++ 2 files changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index db64b0061d70..5cd86cfd6f73 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1003,6 +1003,7 @@ struct dc_debug_options { unsigned int static_screen_wait_frames; bool force_chroma_subsampling_1tap; bool disable_422_left_edge_pixel; + unsigned int force_cositing; }; struct gpu_info_soc_bounding_box_v1_0; @@ -1285,6 +1286,7 @@ struct dc_plane_state { struct tg_color visual_confirm_color; bool is_statically_allocated; + enum chroma_cositing cositing; }; struct dc_plane_info { @@ -1303,6 +1305,7 @@ struct dc_plane_info { int global_alpha_value; bool input_csc_enabled; int layer_index; + enum chroma_cositing cositing; }; #include "dc_stream.h" diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 465e15f57f93..2ad7f60805f5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -738,6 +738,13 @@ enum scanning_type { SCANNING_TYPE_UNDEFINED }; +enum chroma_cositing { + CHROMA_COSITING_NONE, + CHROMA_COSITING_LEFT, + CHROMA_COSITING_TOPLEFT, + CHROMA_COSITING_COUNT +}; + struct dc_crtc_timing_flags { uint32_t INTERLACE :1; uint32_t HSYNC_POSITIVE_POLARITY :1; /* when set to 1, -- cgit v1.2.3-70-g09d2 From e730c585237eb11f61d3e6555bf47e41c72d0626 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Wed, 3 Apr 2024 10:41:29 -0400 Subject: drm/amd/display: Pass sequential ONO bit to DMCUB boot options [Why] IPS ono sequence ordering differs based on the ASIC. [How] Detect the ASIC ID revision and set the boot option accordingly. Feed it through the DCN35 DMUB functions. Reviewed-by: Sung joon Kim Acked-by: Rodrigo Siqueira Signed-off-by: Nicholas Kazlauskas Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 +++++++++ drivers/gpu/drm/amd/display/dmub/dmub_srv.h | 1 + drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c | 1 + 3 files changed, 11 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4d9a76446df8..75c78f6e2fde 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1230,6 +1230,15 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) break; } + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { + case IP_VERSION(3, 5, 0): + case IP_VERSION(3, 5, 1): + hw_params.ips_sequential_ono = adev->external_rev_id > 0x10; + break; + default: + break; + } + status = dmub_srv_hw_init(dmub_srv, &hw_params); if (status != DMUB_STATUS_OK) { DRM_ERROR("Error initializing DMUB HW: %d\n", status); diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 662bdb0e5d3d..2fde1f043d50 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -297,6 +297,7 @@ struct dmub_srv_hw_params { bool dpia_hpd_int_enable_supported; bool disable_clock_gate; bool disallow_dispclk_dppclk_ds; + bool ips_sequential_ono; enum dmub_memory_access_type mem_access_type; enum dmub_ips_disable_type disable_ips; }; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 98afaecd3984..70e63aeb8f89 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -420,6 +420,7 @@ void dmub_dcn35_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu boot_options.bits.disable_clk_ds = params->disallow_dispclk_dppclk_ds; boot_options.bits.disable_clk_gate = params->disable_clock_gate; boot_options.bits.ips_disable = params->disable_ips; + boot_options.bits.ips_sequential_ono = params->ips_sequential_ono; REG_WRITE(DMCUB_SCRATCH14, boot_options.all); } -- cgit v1.2.3-70-g09d2 From 0a571e8657c40047e6602466abfcb6514a391041 Mon Sep 17 00:00:00 2001 From: Chaitanya Dhere Date: Thu, 4 Apr 2024 14:34:04 -0400 Subject: drm/amd/display: Fix incorrect pointer assignment [Why] Pointer initialization and assignment for dml2_options is not done correctly. While this works for some compilers, others give an error. [How] Modify dc_state_create code to correctly initialize the dml2_opt pointer and pass it to dml2_create. Also update the code with correct derefrence operations. Reviewed-by: Aurabindo Pillai Acked-by: Rodrigo Siqueira Signed-off-by: Chaitanya Dhere Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_state.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index bf889bdd3925..76bb05f4d6bf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -191,7 +191,7 @@ static void init_state(struct dc *dc, struct dc_state *state) struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params) { #ifdef CONFIG_DRM_AMD_DC_FP - struct dml2_configuration_options dml2_opt = dc->dml2_options; + struct dml2_configuration_options *dml2_opt = &dc->dml2_options; #endif struct dc_state *state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL); @@ -205,11 +205,11 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p #ifdef CONFIG_DRM_AMD_DC_FP if (dc->debug.using_dml2) { - dml2_opt.use_clock_dc_limits = false; - dml2_create(dc, &dml2_opt, &state->bw_ctx.dml2); + dml2_opt->use_clock_dc_limits = false; + dml2_create(dc, dml2_opt, &state->bw_ctx.dml2); - dml2_opt.use_clock_dc_limits = true; - dml2_create(dc, &dml2_opt, &state->bw_ctx.dml2_dc_power_source); + dml2_opt->use_clock_dc_limits = true; + dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source); } #endif -- cgit v1.2.3-70-g09d2 From e9e4b3a05b017d031f58239a7ca458337d35ed9b Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Thu, 4 Apr 2024 15:03:58 -0400 Subject: drm/amd/display: Enable Z10 flag for IPS FSM [why] IPS FSM requires Z10 flag to be enabled to do save and restore the registers properly. [how] Enable Z10 and use the correct function to determine Z10 capability Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Sung Joon Kim Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index cc1a44a890b5..b29d7d47552b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -758,7 +758,7 @@ static const struct dc_debug_options debug_defaults_drv = { //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions .enable_double_buffered_dsc_pg_support = true, .enable_dp_dig_pixel_rate_div_policy = 1, - .disable_z10 = true, + .disable_z10 = false, .ignore_pg = true, .psp_disabled_wa = true, .ips2_eval_delay_us = 2000, @@ -1722,7 +1722,7 @@ static bool dcn351_validate_bandwidth(struct dc *dc, return out; DC_FP_START(); - dcn351_decide_zstate_support(dc, context); + dcn35_decide_zstate_support(dc, context); DC_FP_END(); return out; -- cgit v1.2.3-70-g09d2 From cb5b29178eab8cc3cc34c508c87dca6ff4306417 Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Thu, 4 Apr 2024 13:00:02 -0400 Subject: drm/amd/display: Rework power sequence and resource allocation logic Rework part of the modifications made to the power sequence and resource allocation logic. Reviewed-by: Xi (Alex) Liu Acked-by: Rodrigo Siqueira Signed-off-by: Sung Joon Kim Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/hwss/dcn351/dcn351_init.c | 8 ++--- .../display/dc/resource/dcn351/dcn351_resource.c | 36 +--------------------- .../display/dc/resource/dcn351/dcn351_resource.h | 6 ---- 3 files changed, 5 insertions(+), 45 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index c4944478ed91..a53092cd619b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -116,10 +116,10 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .update_visual_confirm_color = dcn10_update_visual_confirm_color, .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations, .update_dsc_pg = dcn32_update_dsc_pg, - .calc_blocks_to_gate = dcn351_calc_blocks_to_gate, - .calc_blocks_to_ungate = dcn351_calc_blocks_to_ungate, - .hw_block_power_up = dcn351_hw_block_power_up, - .hw_block_power_down = dcn351_hw_block_power_down, + .calc_blocks_to_gate = dcn35_calc_blocks_to_gate, + .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate, + .hw_block_power_up = dcn35_hw_block_power_up, + .hw_block_power_down = dcn35_hw_block_power_down, .root_clock_control = dcn35_root_clock_control, }; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index b29d7d47552b..3acfbbac8538 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1728,38 +1728,6 @@ static bool dcn351_validate_bandwidth(struct dc *dc, return out; } -struct pipe_ctx *dcn351_acquire_free_pipe_as_secondary_dpp_pipe( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *opp_head_pipe) -{ - int free_pipe_idx; - struct pipe_ctx *free_pipe; - - free_pipe_idx = dcn32_find_optimal_free_pipe_as_secondary_dpp_pipe( - &cur_ctx->res_ctx, &new_ctx->res_ctx, - pool, opp_head_pipe); - if (free_pipe_idx >= 0) { - free_pipe = &new_ctx->res_ctx.pipe_ctx[free_pipe_idx]; - free_pipe->pipe_idx = free_pipe_idx; - free_pipe->stream = opp_head_pipe->stream; - free_pipe->stream_res.tg = opp_head_pipe->stream_res.tg; - free_pipe->stream_res.opp = opp_head_pipe->stream_res.opp; - - free_pipe->plane_res.hubp = pool->hubps[free_pipe->pipe_idx]; - free_pipe->plane_res.ipp = pool->ipps[free_pipe->pipe_idx]; - free_pipe->plane_res.dpp = pool->dpps[free_pipe->pipe_idx]; - free_pipe->plane_res.mpcc_inst = - pool->dpps[free_pipe->pipe_idx]->inst; - } else { - ASSERT(opp_head_pipe); - free_pipe = NULL; - } - - return free_pipe; -} - static struct resource_funcs dcn351_res_pool_funcs = { .destroy = dcn351_destroy_resource_pool, .link_enc_create = dcn35_link_encoder_create, @@ -1771,8 +1739,7 @@ static struct resource_funcs dcn351_res_pool_funcs = { .calculate_wm_and_dlg = NULL, .update_soc_for_wm_a = dcn31_update_soc_for_wm_a, .populate_dml_pipes = dcn351_populate_dml_pipes_from_context_fpu, - .acquire_free_pipe_as_secondary_dpp_pipe = dcn351_acquire_free_pipe_as_secondary_dpp_pipe, - .acquire_free_pipe_as_secondary_opp_head = dcn32_acquire_free_pipe_as_secondary_opp_head, + .acquire_free_pipe_as_secondary_dpp_pipe = dcn20_acquire_free_pipe_for_layer, .release_pipe = dcn20_release_pipe, .add_stream_to_ctx = dcn30_add_stream_to_ctx, .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, @@ -2162,7 +2129,6 @@ static bool dcn351_resource_construct( dc->dml2_options.max_segments_per_hubp = 24; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE;/*todo*/ - dc->dml2_options.map_dc_pipes_with_callbacks = true; if (dc->config.sdpif_request_limit_words_per_umc == 0) dc->config.sdpif_request_limit_words_per_umc = 16;/*todo*/ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h index e4553c5100f8..f3e045777a3d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h @@ -20,10 +20,4 @@ struct resource_pool *dcn351_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc); -struct pipe_ctx *dcn351_acquire_free_pipe_as_secondary_dpp_pipe( - const struct dc_state *cur_ctx, - struct dc_state *new_ctx, - const struct resource_pool *pool, - const struct pipe_ctx *opp_head_pipe); - #endif /* _DCN351_RESOURCE_H_ */ -- cgit v1.2.3-70-g09d2 From 8b9130bae048a7854c6e7d3e2710d0e96e861d31 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 28 Mar 2024 13:19:58 -0400 Subject: drm/amdgpu/gfx11: properly handle regGRBM_GFX_CNTL in soft reset Need to take the srbm_mutex and while we are here, use the helper function soc21_grbm_select(); Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index ae6a0d19e247..5dbfef49dd5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -4506,14 +4506,11 @@ static int gfx_v11_0_soft_reset(void *handle) gfx_v11_0_set_safe_mode(adev, 0); + mutex_lock(&adev->srbm_mutex); for (i = 0; i < adev->gfx.mec.num_mec; ++i) { for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) { - tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); - WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); + soc21_grbm_select(adev, i, k, j, 0); WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2); WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1); @@ -4523,16 +4520,14 @@ static int gfx_v11_0_soft_reset(void *handle) for (i = 0; i < adev->gfx.me.num_me; ++i) { for (j = 0; j < adev->gfx.me.num_queue_per_pipe; j++) { for (k = 0; k < adev->gfx.me.num_pipe_per_me; k++) { - tmp = RREG32_SOC15(GC, 0, regGRBM_GFX_CNTL); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, MEID, i); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, QUEUEID, j); - tmp = REG_SET_FIELD(tmp, GRBM_GFX_CNTL, PIPEID, k); - WREG32_SOC15(GC, 0, regGRBM_GFX_CNTL, tmp); + soc21_grbm_select(adev, i, k, j, 0); WREG32_SOC15(GC, 0, regCP_GFX_HQD_DEQUEUE_REQUEST, 0x1); } } } + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); /* Try to acquire the gfx mutex before access to CP_VMID_RESET */ r = gfx_v11_0_request_gfx_index_mutex(adev, 1); -- cgit v1.2.3-70-g09d2 From 3c858cf65e9a2c99a7c2a5864603d1bc828df1be Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Fri, 12 Apr 2024 15:52:42 +0530 Subject: drm/amdgpu: add missing vbios version from devcoredump Add vbios version in the devcoredump along with formatting the information with proper alignment. Reviewed-by: Alex Deucher Signed-off-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c index 1129e5e5fb42..64fe564b8036 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c @@ -188,10 +188,11 @@ static void amdgpu_devcoredump_fw_info(struct amdgpu_device *adev, adev->vpe.feature_version, adev->vpe.fw_version); drm_printf(p, "\nVBIOS Information\n"); - drm_printf(p, "name: %s\n", ctx->name); - drm_printf(p, "pn %s\n", ctx->vbios_pn); - drm_printf(p, "version: %s\n", ctx->vbios_ver_str); - drm_printf(p, "date: %s\n", ctx->date); + drm_printf(p, "vbios name : %s\n", ctx->name); + drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn); + drm_printf(p, "vbios version : %d\n", ctx->version); + drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str); + drm_printf(p, "vbios date : %s\n", ctx->date); } static ssize_t -- cgit v1.2.3-70-g09d2 From 604079b2cf4f0a08438badeae6f2863f90fca954 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Fri, 5 Apr 2024 23:43:02 -0400 Subject: drm/amd/display: Expand dmub_cmd operations Update dmub_cmd to manipulate SDP control in replay FSM, add command for panel_cntl, expand link rate enum, and increase the reserve byte. Acked-by: Rodrigo Siqueira Signed-off-by: Anthony Koo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 53 ++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 944f14307517..e85fd3ac52c7 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -1614,7 +1614,7 @@ struct dmub_rb_cmd_idle_opt_dcn_restore { */ struct dmub_dcn_notify_idle_cntl_data { uint8_t driver_idle; - uint8_t pad[1]; + uint8_t reserved[59]; }; /** @@ -2335,6 +2335,11 @@ enum phy_link_rate { * UHBR10 - 20.0 Gbps/Lane */ PHY_RATE_2000 = 11, + + PHY_RATE_675 = 12, + /** + * Rate 12 - 6.75 Gbps/Lane + */ }; /** @@ -3062,6 +3067,11 @@ enum dmub_cmd_replay_type { * Set pseudo vtotal */ DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7, + /** + * Set adaptive sync sdp enabled + */ + DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP = 8, + }; /** @@ -3263,6 +3273,20 @@ struct dmub_cmd_replay_set_pseudo_vtotal { */ uint8_t pad; }; +struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data { + /** + * Panel Instance. + * Panel isntance to identify which replay_state to use + * Currently the support is only for 0 or 1 + */ + uint8_t panel_inst; + /** + * enabled: set adaptive sync sdp enabled + */ + uint8_t force_disabled; + + uint8_t pad[2]; +}; /** * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command. @@ -3366,6 +3390,20 @@ struct dmub_rb_cmd_replay_set_pseudo_vtotal { struct dmub_cmd_replay_set_pseudo_vtotal data; }; +/** + * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ +struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp { + /** + * Command header. + */ + struct dmub_cmd_header header; + /** + * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data data; +}; + /** * Data passed from driver to FW in DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command. */ @@ -3421,6 +3459,11 @@ union dmub_replay_cmd_set { * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data. */ struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data; + /** + * Definition of DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command data. + */ + struct dmub_cmd_replay_disabled_adaptive_sync_sdp_data disabled_adaptive_sync_sdp_data; + }; /** @@ -4096,6 +4139,10 @@ enum dmub_cmd_panel_cntl_type { * Queries backlight info for the embedded panel. */ DMUB_CMD__PANEL_CNTL_QUERY_BACKLIGHT_INFO = 1, + /** + * Sets the PWM Freq as per user's requirement. + */ + DMUB_CMD__PANEL_DEBUG_PWM_FREQ = 2, }; /** @@ -4667,6 +4714,10 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command. */ struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal; + /** + * Definition of a DMUB_CMD__REPLAY_DISABLED_ADAPTIVE_SYNC_SDP command. + */ + struct dmub_rb_cmd_replay_disabled_adaptive_sync_sdp replay_disabled_adaptive_sync_sdp; /** * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. */ -- cgit v1.2.3-70-g09d2 From a6ebaca1fbfb1c1d4affb44d804ebb87f13320e5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 29 Mar 2024 18:03:03 -0400 Subject: Revert "drm/amd/display: fix USB-C flag update after enc10 feature init" This reverts commit b5abd7f983e14054593dc91d6df2aa5f8cc67652. This change breaks DSC on 4k monitors at 144Hz over USB-C. Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3254 Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher Cc: Muhammad Ahmed Cc: Tom Chung Cc: Charlene Liu Cc: Hamza Mahfooz Cc: Harry Wentland --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c | 8 +++----- drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c | 4 ++-- 2 files changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index e224a028d68a..8a0460e86309 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -248,14 +248,12 @@ void dcn32_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; - enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; - - enc10->base.features = *enc_features; if (enc10->base.connector.id == CONNECTOR_ID_USBC) enc10->base.features.flags.bits.DP_IS_USB_C = 1; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; enc10->base.transmitter = init_data->transmitter; diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index 87eab924ecaf..20f810a6646c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -183,6 +183,8 @@ void dcn35_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; @@ -237,8 +239,6 @@ void dcn35_link_encoder_construct( } enc10->base.features.flags.bits.HDMI_6GB_EN = 1; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; if (bp_funcs->get_connector_speed_cap_info) result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, -- cgit v1.2.3-70-g09d2 From 54b822b3eac3084eb7a9b0699cf4e659b93d266b Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 2 Apr 2024 15:58:31 -0600 Subject: drm/amd/display: Use dce_version instead of chip_id The chip ID DEVICE_ID_NV_13FE is not meaningful and represents a legacy way of dealing with chip ID. This commit uses dc_version instead of chip_id and also DCN_VERSION_2_01 instead of DEVICE_ID_NV_13FE. Signed-off-by: Rodrigo Siqueira Reviewed-by: Leo Li Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index c1a5908b97c8..a2b4ff2cff16 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -272,7 +272,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } - if (asic_id.chip_id == DEVICE_ID_NV_13FE) { + if (ctx->dce_version == DCN_VERSION_2_01) { dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); return &clk_mgr->base; } -- cgit v1.2.3-70-g09d2 From 9312f9d7308a247cab8a00685c17193d940e25e9 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 2 Apr 2024 16:40:25 -0600 Subject: drm/amd/display: Adjust headers Update headers by removing two unecessary headers and include a new one. Reviewed-by: Leo Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 1 + drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 3 --- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c | 1 - 3 files changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index bec252e1dd27..358a83b1114b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -29,6 +29,7 @@ #include "dcn20/dcn20_clk_mgr.h" #include "dce100/dce_clk_mgr.h" #include "dcn31/dcn31_clk_mgr.h" +#include "dcn32/dcn32_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index a2f48d46d199..744c335718a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -22,9 +22,6 @@ * Authors: AMD * */ - -#include - #include "resource.h" #include "dce_i2c.h" #include "dce_i2c_hw.h" diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 8ed7125d230d..425b830b88d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -29,7 +29,6 @@ #include "reg_helper.h" #include "hw_shared.h" #include "dc.h" -#include "core_types.h" #define DC_LOGGER \ enc1->base.ctx->logger -- cgit v1.2.3-70-g09d2 From 838a59cae36f2bbf4d444270ca407f9e24458517 Mon Sep 17 00:00:00 2001 From: Ethan Bitnun Date: Mon, 12 Feb 2024 18:02:33 -0500 Subject: drm/amd/display: Improve the log precision The previous assumption that there will be an optimize_bandwidth call following every prepare_bandwidth call was incorrect and caused small inaccuracies in logging, as some info was only updated in later prepare calls. Reviewed-by: Leo Li Signed-off-by: Ethan Bitnun Reviewed-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 358a83b1114b..7eecb3403f74 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -830,7 +830,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base, dmcu->funcs->set_psr_wait_loop(dmcu, clk_mgr_base->clks.dispclk_khz / 1000 / 7); - if (dc->config.enable_auto_dpm_test_logs && safe_to_lower) { + if (dc->config.enable_auto_dpm_test_logs) { dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context); } } -- cgit v1.2.3-70-g09d2 From 53ec5cc44b8e877454bde5522302252832715f8c Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Tue, 2 Apr 2024 17:22:42 -0600 Subject: drm/amd/display: Fix PSR command version passed [why] Driver was passing a wrong command version which to DMCUB which caused the DMCUB to treat it as 0, so it wouldn't support dual eDP and would override the panel index to 0 instead of choosing between 0/1. [how] Use DMUB_CMD_PSR_CONTROL_VERSION_1 instead of PSR_VERSION_1. Reviewed-by: Leo Li Signed-off-by: Mikita Lipski Reviewed-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 01c75b66e8f1..8eefba757da4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -3446,6 +3446,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc, if (srf_updates[i].surface->flip_immediate) continue; + update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count; memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects, sizeof(flip_addr->dirty_rects)); -- cgit v1.2.3-70-g09d2 From a50f6fddbaefc26ec2c9a401ea3e71fd5d25582d Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Tue, 2 Apr 2024 17:39:42 -0600 Subject: drm/amd/display: Group scl_data together in resource_build_scaling_params Move the scl_data.format to be close to other similar parts. Reviewed-by: Leo Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 2633e481234f..876b0e5eda95 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1500,9 +1500,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) return false; } - pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( - pipe_ctx->plane_state->format); - /* Timing borders are part of vactive that we are also supposed to skip in addition * to any stream dst offset. Since dm logic assumes dst is in addressable * space we need to add the left and top borders to dst offsets temporarily. @@ -1514,6 +1511,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) /* Calculate H and V active size */ pipe_ctx->plane_res.scl_data.h_active = odm_slice_rec.width; pipe_ctx->plane_res.scl_data.v_active = odm_slice_rec.height; + pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( + pipe_ctx->plane_state->format); /* depends on h_active */ calculate_recout(pipe_ctx); -- cgit v1.2.3-70-g09d2 From 7725605f3137a8880f6f0b6fc94801ebafdebae9 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 3 Apr 2024 10:26:17 -0600 Subject: drm/amd/display: Replace int with unsigned int Reviewed-by: Leo Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index ee6493a9a79c..5c7e4884cac2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -495,7 +495,7 @@ bool dc_stream_remove_writeback(struct dc *dc, struct dc_stream_state *stream, uint32_t dwb_pipe_inst) { - int i = 0, j = 0; + unsigned int i, j; if (stream == NULL) { dm_error("DC: dc_stream is NULL!\n"); return false; -- cgit v1.2.3-70-g09d2 From 71866b72cb5283aea87b6d3c297f431ca2ad556a Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 3 Apr 2024 11:51:20 -0600 Subject: drm/amd/display: Update some comments to improve the code readability This commit updates some comments to be more precise and adds another small comment to some other parts to improve the code readability. Reviewed-by: Leo Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 10 +++++----- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h | 2 +- drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c | 5 +++++ 3 files changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 5cd86cfd6f73..9475dab39af5 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -309,12 +309,12 @@ struct dc_dcc_setting { unsigned int max_compressed_blk_size; unsigned int max_uncompressed_blk_size; bool independent_64b_blks; - //These bitfields to be used starting with DCN + //These bitfields to be used starting with DCN 3.0 struct { - uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case) - uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN - uint32_t dcc_256_128_128 : 1; //available starting with DCN - uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case) + uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case) + uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0 + uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0 + uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case) } dcc_controls; }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index efa2adf4f83d..8da3084d933f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -147,7 +147,7 @@ uint32_t DCN_CUR1_TTU_CNTL1;\ uint32_t VMID_SETTINGS_0 - +/*shared with dcn3.x*/ #define DCN21_HUBP_REG_COMMON_VARIABLE_LIST \ DCN2_HUBP_REG_COMMON_VARIABLE_LIST; \ uint32_t FLIP_PARAMETERS_3;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c index 35dd4bac242a..cd2bfcc51276 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c @@ -77,6 +77,7 @@ static void hubp201_program_requestor(struct hubp *hubp, MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode, CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode); + /* no need to program PTE */ REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0, CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size, MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size, @@ -99,6 +100,10 @@ static void hubp201_setup( struct _vcs_dpi_display_rq_regs_st *rq_regs, struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest) { + /* + * otg is locked when this func is called. Register are double buffered. + * disable the requestors is not needed + */ hubp2_vready_at_or_After_vsync(hubp, pipe_dest); hubp201_program_requestor(hubp, rq_regs); hubp201_program_deadline(hubp, dlg_attr, ttu_attr); -- cgit v1.2.3-70-g09d2 From 3854887b35e031c532dadc12cdd9e4b469304b5b Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 3 Apr 2024 11:14:26 -0600 Subject: drm/amd/display: Remove unnecessary code This commit groups many parts of the code that are redundant or not used and drops all of them. Reviewed-by: Leo Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 - drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c | 3 --- .../gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h | 6 ------ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h | 18 ------------------ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c | 2 -- 5 files changed, 30 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 9475dab39af5..110e2dc884b2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1006,7 +1006,6 @@ struct dc_debug_options { unsigned int force_cositing; }; -struct gpu_info_soc_bounding_box_v1_0; /* Generic structure that can be used to query properties of DC. More fields * can be added as required. diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index 744c335718a7..ee601a6897a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -312,9 +312,6 @@ static bool setup_engine( /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); - /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ - REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); - /*set SW requested I2c speed to default, if API calls in it will be override later*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index d980e6bd6c66..b7a89c39f445 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -167,7 +167,6 @@ struct dcn10_link_enc_registers { uint32_t DIO_LINKD_CNTL; uint32_t DIO_LINKE_CNTL; uint32_t DIO_LINKF_CNTL; - uint32_t DIG_FIFO_CTRL0; uint32_t DIO_CLK_CNTL; uint32_t DIG_BE_CLK_CNTL; }; @@ -475,9 +474,6 @@ struct dcn10_link_enc_registers { type HPO_DP_ENC_SEL;\ type HPO_HDMI_ENC_SEL -#define DCN32_LINK_ENCODER_REG_FIELD_LIST(type) \ - type DIG_FIFO_OUTPUT_PIXEL_MODE - #define DCN35_LINK_ENCODER_REG_FIELD_LIST(type) \ type DIG_BE_ENABLE;\ type DIG_RB_SWITCH_EN;\ @@ -512,7 +508,6 @@ struct dcn10_link_enc_shift { DCN20_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN30_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN31_LINK_ENCODER_REG_FIELD_LIST(uint8_t); - DCN32_LINK_ENCODER_REG_FIELD_LIST(uint8_t); DCN35_LINK_ENCODER_REG_FIELD_LIST(uint8_t); }; @@ -521,7 +516,6 @@ struct dcn10_link_enc_mask { DCN20_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN30_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN31_LINK_ENCODER_REG_FIELD_LIST(uint32_t); - DCN32_LINK_ENCODER_REG_FIELD_LIST(uint32_t); DCN35_LINK_ENCODER_REG_FIELD_LIST(uint32_t); }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h index 35a613bb08bf..08a57ea4591c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dccg.h @@ -29,13 +29,6 @@ #include "dcn20/dcn20_dccg.h" -#define DCCG_REG_LIST_DCN3AG() \ - DCCG_COMMON_REG_LIST_DCN_BASE(),\ - SR(PHYASYMCLK_CLOCK_CNTL),\ - SR(PHYBSYMCLK_CLOCK_CNTL),\ - SR(PHYCSYMCLK_CLOCK_CNTL) - - #define DCCG_REG_LIST_DCN30() \ DCCG_REG_LIST_DCN2(),\ DCCG_SRII(PIXEL_RATE_CNTL, OTG, 2),\ @@ -46,17 +39,6 @@ SR(PHYBSYMCLK_CLOCK_CNTL),\ SR(PHYCSYMCLK_CLOCK_CNTL) -#define DCCG_MASK_SH_LIST_DCN3AG(mask_sh) \ - DCCG_MASK_SH_LIST_DCN2_1(mask_sh),\ - DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_EN, mask_sh),\ - DCCG_SF(HDMICHARCLK0_CLOCK_CNTL, HDMICHARCLK0_SRC_SEL, mask_sh),\ - DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_SRC_SEL, mask_sh),\ - DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_SRC_SEL, mask_sh),\ - DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, mask_sh),\ - DCCG_SF(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_SRC_SEL, mask_sh) - #define DCCG_MASK_SH_LIST_DCN3(mask_sh) \ DCCG_MASK_SH_LIST_DCN2(mask_sh),\ DCCG_SF(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c index 1b9d9495f76d..fae98cf52020 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c @@ -251,9 +251,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = { .set_fc_enable = dwb3_set_fc_enable, .set_stereo = dwb3_set_stereo, .set_new_content = dwb3_set_new_content, - .dwb_program_output_csc = NULL, .dwb_ogam_set_input_transfer_func = dwb3_ogam_set_input_transfer_func, //TODO: rename - .dwb_set_scaler = NULL, }; void dcn30_dwbc_construct(struct dcn30_dwbc *dwbc30, -- cgit v1.2.3-70-g09d2 From f7c161a4c250d44eb96a1dcbf5bb3a8e3eba525b Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 11 Apr 2024 14:57:52 -0400 Subject: drm/amdgpu: increase mes submission timeout MES internally has a timeout allowance of 2 seconds. Increase driver timeout to 3 seconds to be safe. Signed-off-by: Jonathan Kim Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index e5230078a4cd..81833395324a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -111,7 +111,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring; unsigned long flags; - signed long timeout = adev->usec_timeout; + signed long timeout = 3000000; /* 3000 ms */ if (amdgpu_emu_mode) { timeout *= 100; -- cgit v1.2.3-70-g09d2 From 68c14b31ca309d619cbf60648b3a45683262112c Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Fri, 17 Aug 2018 11:21:30 -0400 Subject: drm/amd/display: Update FMT settings for 4:2:0 [Why] Update FMT_CONTROL settings based on HW spec [How] Update FMT settings for 4:2:0 Reviewed-by: Leo Li Signed-off-by: Eric Bernstein Reviewed-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c | 9 ++++++++- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h | 2 ++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 5838a11efd00..71e9288d60ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -168,6 +168,10 @@ static void opp1_set_pixel_encoding( case PIXEL_ENCODING_RGB: case PIXEL_ENCODING_YCBCR444: + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 0, + FMT_SUBSAMPLING_MODE, 0, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); break; case PIXEL_ENCODING_YCBCR422: @@ -177,7 +181,10 @@ static void opp1_set_pixel_encoding( FMT_CBCR_BIT_REDUCTION_BYPASS, 0); break; case PIXEL_ENCODING_YCBCR420: - REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2); + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 2, + FMT_SUBSAMPLING_MODE, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 1); break; default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h index 2c0ecfa5a643..c87de68a509e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h @@ -79,6 +79,8 @@ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_PIXEL_ENCODING, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_SUBSAMPLING_MODE, mask_sh), \ + OPP_SF(FMT0_FMT_CONTROL, FMT_CBCR_BIT_REDUCTION_BYPASS, mask_sh), \ OPP_SF(FMT0_FMT_CONTROL, FMT_STEREOSYNC_OVERRIDE, mask_sh), \ OPP_SF(FMT0_FMT_DITHER_RAND_R_SEED, FMT_RAND_R_SEED, mask_sh), \ OPP_SF(FMT0_FMT_DITHER_RAND_G_SEED, FMT_RAND_G_SEED, mask_sh), \ -- cgit v1.2.3-70-g09d2 From 35be2cbc9266f8f55703107ed233fa02d7ce16ac Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 3 Apr 2024 14:25:22 -0600 Subject: drm/amd/display: Rework dcn10_stream_encoder header This commit remove some unused code and also rename one of the define. Reviewed-by: Leo Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index c429590f1298..1b96972b9d0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -127,7 +127,6 @@ struct dcn10_stream_enc_registers { uint32_t AFMT_60958_1; uint32_t AFMT_60958_2; uint32_t DIG_FE_CNTL; - uint32_t DIG_FE_CNTL2; uint32_t DIG_FIFO_STATUS; uint32_t DP_MSE_RATE_CNTL; uint32_t DP_MSE_RATE_UPDATE; @@ -570,7 +569,7 @@ struct dcn10_stream_enc_registers { type DP_SEC_GSP11_ENABLE;\ type DP_SEC_GSP11_LINE_NUM -#define SE_REG_FIELD_LIST_DCN3_2(type) \ +#define SE_REG_FIELD_LIST_DCN3_1_COMMON(type) \ type DIG_FIFO_OUTPUT_PIXEL_MODE;\ type DP_PIXEL_PER_CYCLE_PROCESSING_MODE;\ type DIG_SYMCLK_FE_ON;\ @@ -599,7 +598,7 @@ struct dcn10_stream_encoder_shift { uint8_t HDMI_ACP_SEND; SE_REG_FIELD_LIST_DCN2_0(uint8_t); SE_REG_FIELD_LIST_DCN3_0(uint8_t); - SE_REG_FIELD_LIST_DCN3_2(uint8_t); + SE_REG_FIELD_LIST_DCN3_1_COMMON(uint8_t); SE_REG_FIELD_LIST_DCN3_5_COMMON(uint8_t); }; @@ -608,7 +607,7 @@ struct dcn10_stream_encoder_mask { uint32_t HDMI_ACP_SEND; SE_REG_FIELD_LIST_DCN2_0(uint32_t); SE_REG_FIELD_LIST_DCN3_0(uint32_t); - SE_REG_FIELD_LIST_DCN3_2(uint32_t); + SE_REG_FIELD_LIST_DCN3_1_COMMON(uint32_t); SE_REG_FIELD_LIST_DCN3_5_COMMON(uint32_t); }; @@ -667,9 +666,6 @@ void enc1_stream_encoder_send_immediate_sdp_message( void enc1_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc); -void enc1_stream_encoder_reset_fifo( - struct stream_encoder *enc); - void enc1_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc); -- cgit v1.2.3-70-g09d2 From 460f6e3950603cd3138847b1e2db5d878b0db62b Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 3 Apr 2024 14:35:07 -0600 Subject: drm/amd/display: Move REG sequence from program ogam to idle before connect Fill ring buffer before offload. Reviewed-by: Leo Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 16b5ff208d14..ea73473b970a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -395,9 +395,12 @@ static void mpc20_program_ogam_pwl( MPCC_OGAM_LUT_DATA, rgb[i].delta_green_reg); REG_SET(MPCC_OGAM_LUT_DATA[mpcc_id], 0, MPCC_OGAM_LUT_DATA, rgb[i].delta_blue_reg); - } + REG_SEQ_SUBMIT(); + PERF_TRACE(); + REG_SEQ_WAIT_DONE(); + PERF_TRACE(); } static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id, @@ -501,11 +504,6 @@ void mpc2_assert_mpcc_idle_before_connect(struct mpc *mpc, int mpcc_id) ASSERT(!mpc_disabled); ASSERT(!mpc_idle); } - - REG_SEQ_SUBMIT(); - PERF_TRACE(); - REG_SEQ_WAIT_DONE(); - PERF_TRACE(); } static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst) -- cgit v1.2.3-70-g09d2 From e1f7aa2d54f81d825409d3a73400221ebfccdbef Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 3 Apr 2024 14:41:10 -0600 Subject: drm/amd/display: Update DCN201 link encoder registers Add some missing registers expansion in the dcn201_link_encoder file. Reviewed-by: Leo Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h index 8b95ef251332..be25e8dc0636 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h @@ -30,6 +30,10 @@ #define DPCS_DCN201_MASK_SH_LIST(mask_sh)\ DPCS_MASK_SH_LIST(mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, VCO_LD_VAL_OVRD_EN, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD, mask_sh),\ + LE_SF(DPCSSYS_CR0_RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, REF_LD_VAL_OVRD_EN, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\ @@ -44,7 +48,15 @@ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh) #define DPCS_DCN201_REG_LIST(id) \ - DPCS_DCN2_CMN_REG_LIST(id) + DPCS_DCN2_CMN_REG_LIST(id), \ + SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE1_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE2_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_2, DPCSSYS_CR, id), \ + SRI_IX(RAWLANE3_DIG_PCS_XF_RX_OVRD_IN_3, DPCSSYS_CR, id) void dcn201_link_encoder_construct( struct dcn20_link_encoder *enc20, -- cgit v1.2.3-70-g09d2 From 251d7ff31192f210b12170b826213b3987e614f5 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 3 Apr 2024 15:04:54 -0600 Subject: drm/amd/display: Add missing callback for init_watermarks in DCN 301 Reviewed-by: Leo Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c index a046664e2031..c1959672df50 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_hubbub.c @@ -63,6 +63,7 @@ static const struct hubbub_funcs hubbub301_funcs = { .verify_allow_pstate_change_high = hubbub1_verify_allow_pstate_change_high, .force_wm_propagate_to_pipes = hubbub3_force_wm_propagate_to_pipes, .force_pstate_change_control = hubbub3_force_pstate_change_control, + .init_watermarks = hubbub3_init_watermarks, .hubbub_read_state = hubbub2_read_state, }; -- cgit v1.2.3-70-g09d2 From d4a5b420cc4dcd84af6d6b894b9b65202d83a0ab Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Wed, 3 Apr 2024 15:11:10 -0600 Subject: drm/amd/display: Add missing replay field Reviewed-by: Leo Li Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_types.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 614d7c27c759..0f66d00ef80f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -1050,6 +1050,8 @@ union replay_error_status { struct replay_config { /* Replay feature is supported */ bool replay_supported; + /* Replay caps support DPCD & EDID caps*/ + bool replay_cap_support; /* Power opt flags that are supported */ unsigned int replay_power_opt_supported; /* SMU optimization is supported */ -- cgit v1.2.3-70-g09d2 From 86842046726e5a8ea82d138c5bc0463d6505edbe Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 7 Apr 2024 20:47:29 -0400 Subject: drm/amd/display: 3.2.281 This version brings along following fixes: * Expand dmub_cmd operations. * Update DVI configuration. * Modify power sequence. * Enable Z10 flag for IPS. * Multiple code cleanups. Acked-by: Rodrigo Siqueira Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 110e2dc884b2..3c33c3bcbe2c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -53,7 +53,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.280" +#define DC_VER "3.2.281" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit v1.2.3-70-g09d2 From f23558627f2bb28df3b7f3d1d53b1e7f4bd1e250 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 12 Apr 2024 09:53:15 +0800 Subject: drm/amdgpu: add new aca smu callback func parse_error_code() add new aca smu callback parse_error_code{} to avoid specific asic check in amdgpu_aca.c file Signed-off-by: Yang Wang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c | 23 ++++++++-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h | 1 + .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 13 ++++++++++++ 3 files changed, 22 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index c02634a124dd..c50202215f6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -753,23 +753,13 @@ int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info) static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank) { - int error_code; - - switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(13, 0, 6): - if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) { - error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); - return error_code & 0xff; - } - break; - default: - break; - } + struct amdgpu_aca *aca = &adev->aca; + const struct aca_smu_funcs *smu_funcs = aca->smu_funcs; - /* NOTE: the true error code is encoded in status.errorcode[0:7] */ - error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); + if (!smu_funcs || !smu_funcs->parse_error_code) + return -EOPNOTSUPP; - return error_code & 0xff; + return smu_funcs->parse_error_code(adev, bank); } int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size) @@ -780,6 +770,9 @@ int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank return -EINVAL; error_code = aca_bank_get_error_code(adev, bank); + if (error_code < 0) + return error_code; + for (i = 0; i < size; i++) { if (err_codes[i] == error_code) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h index 3765843ea648..5ef6b745f222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h @@ -173,6 +173,7 @@ struct aca_smu_funcs { int (*set_debug_mode)(struct amdgpu_device *adev, bool enable); int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count); int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_smu_type type, int idx, struct aca_bank *bank); + int (*parse_error_code)(struct amdgpu_device *adev, struct aca_bank *bank); }; struct amdgpu_aca { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index 16179e170874..bc1f3fe4d82a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -3118,12 +3118,25 @@ static int aca_smu_get_valid_aca_bank(struct amdgpu_device *adev, return 0; } +static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank *bank) +{ + int error_code; + + if (!(adev->flags & AMD_IS_APU) && adev->pm.fw_version >= 0x00555600) + error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]); + else + error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]); + + return error_code & 0xff; +} + static const struct aca_smu_funcs smu_v13_0_6_aca_smu_funcs = { .max_ue_bank_count = 12, .max_ce_bank_count = 12, .set_debug_mode = aca_smu_set_debug_mode, .get_valid_aca_count = aca_smu_get_valid_aca_count, .get_valid_aca_bank = aca_smu_get_valid_aca_bank, + .parse_error_code = aca_smu_parse_error_code, }; static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, -- cgit v1.2.3-70-g09d2 From 98856136c485e586ab063f0b3780dfc0c78df780 Mon Sep 17 00:00:00 2001 From: xinhui pan Date: Thu, 11 Apr 2024 11:11:38 +0800 Subject: drm/amdgpu: validate the parameters of bo mapping operations more clearly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Verify the parameters of amdgpu_vm_bo_(map/replace_map/clearing_mappings) in one common place. Fixes: dc54d3d1744d ("drm/amdgpu: implement AMDGPU_VA_OP_CLEAR v2") Cc: stable@vger.kernel.org Reported-by: Vlad Stolyarov Suggested-by: Christian König Signed-off-by: xinhui pan Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 72 ++++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8af3f0fd3073..4e2391c83d7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1647,6 +1647,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, trace_amdgpu_vm_bo_map(bo_va, mapping); } +/* Validate operation parameters to prevent potential abuse */ +static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev, + struct amdgpu_bo *bo, + uint64_t saddr, + uint64_t offset, + uint64_t size) +{ + uint64_t tmp, lpfn; + + if (saddr & AMDGPU_GPU_PAGE_MASK + || offset & AMDGPU_GPU_PAGE_MASK + || size & AMDGPU_GPU_PAGE_MASK) + return -EINVAL; + + if (check_add_overflow(saddr, size, &tmp) + || check_add_overflow(offset, size, &tmp) + || size == 0 /* which also leads to end < begin */) + return -EINVAL; + + /* make sure object fit at this offset */ + if (bo && offset + size > amdgpu_bo_size(bo)) + return -EINVAL; + + /* Ensure last pfn not exceed max_pfn */ + lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT; + if (lpfn >= adev->vm_manager.max_pfn) + return -EINVAL; + + return 0; +} + /** * amdgpu_vm_bo_map - map bo inside a vm * @@ -1673,21 +1704,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; + int r; - /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) - return -EINVAL; - if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); if (tmp) { @@ -1740,17 +1764,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, uint64_t eaddr; int r; - /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) - return -EINVAL; - if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; /* Allocate all the needed memory */ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); @@ -1764,7 +1780,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, } saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; mapping->start = saddr; mapping->last = eaddr; @@ -1851,10 +1867,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; LIST_HEAD(removed); uint64_t eaddr; + int r; + + r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size); + if (r) + return r; - eaddr = saddr + size - 1; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; /* Allocate all the needed memory */ before = kzalloc(sizeof(*before), GFP_KERNEL); -- cgit v1.2.3-70-g09d2 From 394ae0603a6764d36437a26c097ef549e0eee1ff Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 4 Apr 2024 16:25:40 +0200 Subject: drm/amdgpu: fix visible VRAM handling during faults MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we removed the hacky start code check we actually didn't took into account that *all* VRAM pages needs to be CPU accessible. Clean up the code and unify the handling into a single helper which checks if the whole resource is CPU accessible. The only place where a partial check would make sense is during eviction, but that is neglitible. Signed-off-by: Christian König Fixes: aed01a68047b ("drm/amdgpu: Remove TTM resource->start visible VRAM condition v2") Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher CC: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 22 +++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 22 ----------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 61 +++++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 3 ++ 5 files changed, 53 insertions(+), 57 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 0a4b09709cfb..ec888fc6ead8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -819,7 +819,7 @@ retry: p->bytes_moved += ctx.bytes_moved; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - amdgpu_bo_in_cpu_visible_vram(bo)) + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) p->bytes_moved_vis += ctx.bytes_moved; if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 010b0cb7693c..2099159a693f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -617,8 +617,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, return r; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - bo->tbo.resource->mem_type == TTM_PL_VRAM && - amdgpu_bo_in_cpu_visible_vram(bo)) + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, ctx.bytes_moved); else @@ -1272,23 +1271,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) void amdgpu_bo_get_memory(struct amdgpu_bo *bo, struct amdgpu_mem_stats *stats) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_resource *res = bo->tbo.resource; uint64_t size = amdgpu_bo_size(bo); struct drm_gem_object *obj; unsigned int domain; bool shared; /* Abort if the BO doesn't currently have a backing store */ - if (!bo->tbo.resource) + if (!res) return; obj = &bo->tbo.base; shared = drm_gem_object_is_shared_for_memory_stats(obj); - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); + domain = amdgpu_mem_type_to_domain(res->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: stats->vram += size; - if (amdgpu_bo_in_cpu_visible_vram(bo)) + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) stats->visible_vram += size; if (shared) stats->vram_shared += size; @@ -1389,10 +1390,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* Remember that this BO was accessed by the CPU */ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - if (bo->resource->mem_type != TTM_PL_VRAM) - return 0; - - if (amdgpu_bo_in_cpu_visible_vram(abo)) + if (amdgpu_res_cpu_visible(adev, bo->resource)) return 0; /* Can't move a pinned BO to visible VRAM */ @@ -1415,7 +1413,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* this should never happen */ if (bo->resource->mem_type == TTM_PL_VRAM && - !amdgpu_bo_in_cpu_visible_vram(abo)) + !amdgpu_res_cpu_visible(adev, bo->resource)) return VM_FAULT_SIGBUS; ttm_bo_move_to_lru_tail_unlocked(bo); @@ -1579,6 +1577,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, */ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct dma_buf_attachment *attachment; struct dma_buf *dma_buf; const char *placement; @@ -1587,10 +1586,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) if (dma_resv_trylock(bo->tbo.base.resv)) { unsigned int domain; + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: - if (amdgpu_bo_in_cpu_visible_vram(bo)) + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) placement = "VRAM VISIBLE"; else placement = "VRAM"; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index be679c42b0b8..fa03d9e4874c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -250,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); } -/** - * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM - */ -static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct amdgpu_res_cursor cursor; - - if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM) - return false; - - amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); - while (cursor.remaining) { - if (cursor.start < adev->gmc.visible_vram_size) - return true; - - amdgpu_res_next(&cursor, cursor.size); - } - - return false; -} - /** * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6417cb76ccd4..1d71729e3f6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && - amdgpu_bo_in_cpu_visible_vram(abo)) { + amdgpu_res_cpu_visible(adev, bo->resource)) { /* Try evicting to the CPU inaccessible part of VRAM * first, but only set GTT as busy placement, so this @@ -403,40 +403,55 @@ error: return r; } -/* - * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy +/** + * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU + * @adev: amdgpu device + * @res: the resource to check * - * Called by amdgpu_bo_move() + * Returns: true if the full resource is CPU visible, false otherwise. */ -static bool amdgpu_mem_visible(struct amdgpu_device *adev, - struct ttm_resource *mem) +bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, + struct ttm_resource *res) { - u64 mem_size = (u64)mem->size; struct amdgpu_res_cursor cursor; - u64 end; - if (mem->mem_type == TTM_PL_SYSTEM || - mem->mem_type == TTM_PL_TT) + if (!res) + return false; + + if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || + res->mem_type == AMDGPU_PL_PREEMPT) return true; - if (mem->mem_type != TTM_PL_VRAM) + + if (res->mem_type != TTM_PL_VRAM) return false; - amdgpu_res_first(mem, 0, mem_size, &cursor); - end = cursor.start + cursor.size; + amdgpu_res_first(res, 0, res->size, &cursor); while (cursor.remaining) { + if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size) + return false; amdgpu_res_next(&cursor, cursor.size); + } - if (!cursor.remaining) - break; + return true; +} - /* ttm_resource_ioremap only supports contiguous memory */ - if (end != cursor.start) - return false; +/* + * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy + * + * Called by amdgpu_bo_move() + */ +static bool amdgpu_res_copyable(struct amdgpu_device *adev, + struct ttm_resource *mem) +{ + if (!amdgpu_res_cpu_visible(adev, mem)) + return false; - end = cursor.start + cursor.size; - } + /* ttm_resource_ioremap only supports contiguous memory */ + if (mem->mem_type == TTM_PL_VRAM && + !(mem->placement & TTM_PL_FLAG_CONTIGUOUS)) + return false; - return end <= adev->gmc.visible_vram_size; + return true; } /* @@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (r) { /* Check that all memory is CPU accessible */ - if (!amdgpu_mem_visible(adev, old_mem) || - !amdgpu_mem_visible(adev, new_mem)) { + if (!amdgpu_res_copyable(adev, old_mem) || + !amdgpu_res_copyable(adev, new_mem)) { pr_err("Move buffer fallback to memcpy unavailable\n"); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 65ec82141a8e..32cf6b6f6efd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); +bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, + struct ttm_resource *res); + int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, -- cgit v1.2.3-70-g09d2 From 959056982a9b46758e0582bc6724b6ef51012e91 Mon Sep 17 00:00:00 2001 From: Ma Jun Date: Tue, 2 Apr 2024 17:21:01 +0800 Subject: drm/amdgpu: Fix discovery initialization failure during pci rescan MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Waiting for system ready to fix the discovery initialization failure issue. This failure usually occurs when dGPU is removed and then rescanned via command line. It's caused by following two errors: [1] vram size is 0 [2] wrong binary signature Signed-off-by: Ma Jun Acked-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 07c5fca06178..90735e966318 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -255,7 +255,6 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, uint64_t vram_size; u32 msg; int i, ret = 0; - int ip_discovery_ver = 0; /* It can take up to a second for IFWI init to complete on some dGPUs, * but generally it should be in the 60-100ms range. Normally this starts @@ -265,17 +264,13 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, * continue. */ - ip_discovery_ver = RREG32(mmIP_DISCOVERY_VERSION); - if ((dev_is_removable(&adev->pdev->dev)) || - (ip_discovery_ver == IP_DISCOVERY_V2) || - (ip_discovery_ver == IP_DISCOVERY_V4)) { - for (i = 0; i < 1000; i++) { - msg = RREG32(mmMP0_SMN_C2PMSG_33); - if (msg & 0x80000000) - break; - msleep(1); - } + for (i = 0; i < 1000; i++) { + msg = RREG32(mmMP0_SMN_C2PMSG_33); + if (msg & 0x80000000) + break; + usleep_range(1000, 1100); } + vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; if (vram_size) { -- cgit v1.2.3-70-g09d2 From d9fbd64e8e3176654df6d743fe59d5ee64d4d9e7 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Thu, 11 Apr 2024 16:08:22 -0600 Subject: Revert "drm/amd/display: Enable cur_rom_en even if cursor degamma is not enabled" This reverts commit 002001b092dd662ab79fcedcdd96c037cf0213d6. The original patch introduces cursor gamma issue to multiple Linux compositors. For this reason this commit reverts this change. Cc: Melissa Wen Cc: Harry Wentland Signed-off-by: Rodrigo Siqueira Reviewed-by: Melissa Wen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c index ed1e2f65f5b5..f8c0cee34080 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c @@ -395,7 +395,9 @@ void dpp3_set_cursor_attributes( if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { - cur_rom_en = 1; + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } } REG_UPDATE_3(CURSOR0_CONTROL, -- cgit v1.2.3-70-g09d2 From 12b8b4e68510a7e761cbbf16038ef4bb11812d13 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Mon, 15 Apr 2024 01:38:39 +0200 Subject: drm/amdgpu: Add missing space to DRM_WARN() message s/,please/, please/ Reviewed-by: Alex Deucher Signed-off-by: Thorsten Blum Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 170da4551ed5..48a2de92180a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1460,7 +1460,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) - DRM_WARN("System can't access extended configuration space,please check!!\n"); + DRM_WARN("System can't access extended configuration space, please check!!\n"); /* skip if the bios has already enabled large BAR */ if (adev->gmc.real_vram_size && -- cgit v1.2.3-70-g09d2 From 6c6acc5f33ab727cfb6fe88cf3d96ca9c67cac5b Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 26 Feb 2024 10:03:57 +0800 Subject: drm/amdgpu: Load ipkeymgr drv for psp v14 while DBG_DRV is renamed to HAD_DRV for psp v14, part of its APIs/functionality is moved to a new component named Ipkeymgr_Drv. Signed-off-by: Hawking Zhang Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 15 +++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 8 +++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 1 + drivers/gpu/drm/amd/amdgpu/psp_v14_0.c | 5 +++++ 4 files changed, 28 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index edae581059af..4bd4602d11b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -2265,6 +2265,15 @@ static int psp_hw_start(struct psp_context *psp) } } + if ((is_psp_fw_valid(psp->ipkeymgr_drv)) && + (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) { + ret = psp_bootloader_load_ipkeymgr_drv(psp); + if (ret) { + dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n"); + return ret; + } + } + if ((is_psp_fw_valid(psp->sos)) && (psp->funcs->bootloader_load_sos != NULL)) { ret = psp_bootloader_load_sos(psp); @@ -3280,6 +3289,12 @@ static int parse_sos_bin_descriptor(struct psp_context *psp, psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes); psp->ras_drv.start_addr = ucode_start_addr; break; + case PSP_FW_TYPE_PSP_IPKEYMGR_DRV: + psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version); + psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version); + psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes); + psp->ipkeymgr_drv.start_addr = ucode_start_addr; + break; default: dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index ee16f134ae92..66b3f88fbecd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -74,7 +74,8 @@ enum psp_bootloader_cmd { PSP_BL__LOAD_SOCDRV = 0xB0000, PSP_BL__LOAD_DBGDRV = 0xC0000, PSP_BL__LOAD_INTFDRV = 0xD0000, - PSP_BL__LOAD_RASDRV = 0xE0000, + PSP_BL__LOAD_RASDRV = 0xE0000, + PSP_BL__LOAD_IPKEYMGRDRV = 0xF0000, PSP_BL__DRAM_LONG_TRAIN = 0x100000, PSP_BL__DRAM_SHORT_TRAIN = 0x200000, PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000, @@ -117,6 +118,7 @@ struct psp_funcs { int (*bootloader_load_intf_drv)(struct psp_context *psp); int (*bootloader_load_dbg_drv)(struct psp_context *psp); int (*bootloader_load_ras_drv)(struct psp_context *psp); + int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp); int (*bootloader_load_sos)(struct psp_context *psp); int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type); @@ -336,6 +338,7 @@ struct psp_context { struct psp_bin_desc intf_drv; struct psp_bin_desc dbg_drv; struct psp_bin_desc ras_drv; + struct psp_bin_desc ipkeymgr_drv; /* tmr buffer */ struct amdgpu_bo *tmr_bo; @@ -424,6 +427,9 @@ struct amdgpu_psp_funcs { #define psp_bootloader_load_ras_drv(psp) \ ((psp)->funcs->bootloader_load_ras_drv ? \ (psp)->funcs->bootloader_load_ras_drv((psp)) : 0) +#define psp_bootloader_load_ipkeymgr_drv(psp) \ + ((psp)->funcs->bootloader_load_ipkeymgr_drv ? \ + (psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0) #define psp_bootloader_load_sos(psp) \ ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0) #define psp_smu_reload_quirk(psp) \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 619445760037..105d4de0613a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -125,6 +125,7 @@ enum psp_fw_type { PSP_FW_TYPE_PSP_INTF_DRV, PSP_FW_TYPE_PSP_DBG_DRV, PSP_FW_TYPE_PSP_RAS_DRV, + PSP_FW_TYPE_PSP_IPKEYMGR_DRV, PSP_FW_TYPE_MAX_INDEX, }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c index 78a95f8f370b..241d5ff2ef3c 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -177,6 +177,10 @@ static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp) return psp_v14_0_bootloader_load_component(psp, &psp->ras_drv, PSP_BL__LOAD_RASDRV); } +static int psp_v14_0_bootloader_load_ipkeymgr_drv(struct psp_context *psp) +{ + return psp_v14_0_bootloader_load_component(psp, &psp->ipkeymgr_drv, PSP_BL__LOAD_IPKEYMGRDRV); +} static int psp_v14_0_bootloader_load_sos(struct psp_context *psp) { @@ -653,6 +657,7 @@ static const struct psp_funcs psp_v14_0_funcs = { .bootloader_load_intf_drv = psp_v14_0_bootloader_load_intf_drv, .bootloader_load_dbg_drv = psp_v14_0_bootloader_load_dbg_drv, .bootloader_load_ras_drv = psp_v14_0_bootloader_load_ras_drv, + .bootloader_load_ipkeymgr_drv = psp_v14_0_bootloader_load_ipkeymgr_drv, .bootloader_load_sos = psp_v14_0_bootloader_load_sos, .ring_create = psp_v14_0_ring_create, .ring_stop = psp_v14_0_ring_stop, -- cgit v1.2.3-70-g09d2 From 1347853271ed3ec85dd42586fa31f746664504e7 Mon Sep 17 00:00:00 2001 From: Ma Jun Date: Wed, 27 Mar 2024 15:18:40 +0800 Subject: drm/amdgpu: refactoring the runtime pm mode detection code refactor the code of runtime pm mode detection to support amdgpu_runtime_pm =2 and 1 two cases Signed-off-by: Ma Jun Reviewed-by: Yang Wang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 75 ++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 48 +------------------ 3 files changed, 77 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 65c17c59c152..e0d7f4ee7e16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1409,6 +1409,7 @@ bool amdgpu_device_supports_px(struct drm_device *dev); bool amdgpu_device_supports_boco(struct drm_device *dev); bool amdgpu_device_supports_smart_shift(struct drm_device *dev); int amdgpu_device_supports_baco(struct drm_device *dev); +void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); int amdgpu_device_baco_enter(struct drm_device *dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 48a2de92180a..1b2e177bc2d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -350,6 +350,81 @@ int amdgpu_device_supports_baco(struct drm_device *dev) return amdgpu_asic_supports_baco(adev); } +void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) +{ + struct drm_device *dev; + int bamaco_support; + + dev = adev_to_drm(adev); + + adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; + bamaco_support = amdgpu_device_supports_baco(dev); + + switch (amdgpu_runtime_pm) { + case 2: + if (bamaco_support & MACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; + dev_info(adev->dev, "Forcing BAMACO for runtime pm\n"); + } else if (bamaco_support == BACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n"); + } + break; + case 1: + if (bamaco_support & BACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + dev_info(adev->dev, "Forcing BACO for runtime pm\n"); + } + break; + case -1: + case -2: + if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */ + adev->pm.rpm_mode = AMDGPU_RUNPM_PX; + dev_info(adev->dev, "Using ATPX for runtime pm\n"); + } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */ + adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; + dev_info(adev->dev, "Using BOCO for runtime pm\n"); + } else { + if (!bamaco_support) + goto no_runtime_pm; + + switch (adev->asic_type) { + case CHIP_VEGA20: + case CHIP_ARCTURUS: + /* BACO are not supported on vega20 and arctrus */ + break; + case CHIP_VEGA10: + /* enable BACO as runpm mode if noretry=0 */ + if (!adev->gmc.noretry) + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + break; + default: + /* enable BACO as runpm mode on CI+ */ + adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; + break; + } + + if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { + if (bamaco_support & MACO_SUPPORT) { + adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; + dev_info(adev->dev, "Using BAMACO for runtime pm\n"); + } else { + dev_info(adev->dev, "Using BACO for runtime pm\n"); + } + } + } + break; + case 0: + dev_info(adev->dev, "runtime pm is manually disabled\n"); + break; + default: + break; + } + +no_runtime_pm: + if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) + dev_info(adev->dev, "Runtime PM not available\n"); +} /** * amdgpu_device_supports_smart_shift - Is the device dGPU with * smart shift support diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 55eb4e4eb8f2..a0ea6fe8d060 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -133,7 +133,6 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev) int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) { struct drm_device *dev; - int bamaco_support = 0; int r, acpi_status; dev = adev_to_drm(adev); @@ -150,52 +149,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) goto out; } - adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; - if (amdgpu_device_supports_px(dev) && - (amdgpu_runtime_pm != 0)) { /* enable PX as runtime mode */ - adev->pm.rpm_mode = AMDGPU_RUNPM_PX; - dev_info(adev->dev, "Using ATPX for runtime pm\n"); - } else if (amdgpu_device_supports_boco(dev) && - (amdgpu_runtime_pm != 0)) { /* enable boco as runtime mode */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; - dev_info(adev->dev, "Using BOCO for runtime pm\n"); - } else if (amdgpu_runtime_pm != 0) { - bamaco_support = amdgpu_device_supports_baco(dev); - - if (!bamaco_support) - goto no_runtime_pm; - - switch (adev->asic_type) { - case CHIP_VEGA20: - case CHIP_ARCTURUS: - /* enable BACO as runpm mode if runpm=1 */ - if (amdgpu_runtime_pm > 0) - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - case CHIP_VEGA10: - /* enable BACO as runpm mode if noretry=0 */ - if (!adev->gmc.noretry) - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - default: - /* enable BACO as runpm mode on CI+ */ - adev->pm.rpm_mode = AMDGPU_RUNPM_BACO; - break; - } - - if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) { - if (bamaco_support & MACO_SUPPORT) { - adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO; - dev_info(adev->dev, "Using BAMACO for runtime pm\n"); - } else { - dev_info(adev->dev, "Using BACO for runtime pm\n"); - } - } - } - -no_runtime_pm: - if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) - dev_info(adev->dev, "NO pm mode for runtime pm\n"); + amdgpu_device_detect_runtime_pm_mode(adev); /* Call ACPI methods: require modeset init * but failure is not fatal -- cgit v1.2.3-70-g09d2 From 577cbed31818361cefb642eeeb558b8755ccbe2c Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 26 Feb 2024 10:14:53 +0800 Subject: drm/amdgpu: rename DBG_DRV to HAD_DRV for psp v14 Add a psp bl command enum for HAD_DRV. Signed-off-by: Hawking Zhang Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 1 + drivers/gpu/drm/amd/amdgpu/psp_v14_0.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 66b3f88fbecd..3635303e6548 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -73,6 +73,7 @@ enum psp_bootloader_cmd { PSP_BL__LOAD_KEY_DATABASE = 0x80000, PSP_BL__LOAD_SOCDRV = 0xB0000, PSP_BL__LOAD_DBGDRV = 0xC0000, + PSP_BL__LOAD_HADDRV = PSP_BL__LOAD_DBGDRV, PSP_BL__LOAD_INTFDRV = 0xD0000, PSP_BL__LOAD_RASDRV = 0xE0000, PSP_BL__LOAD_IPKEYMGRDRV = 0xF0000, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c index 241d5ff2ef3c..f08a32c18694 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -169,7 +169,8 @@ static int psp_v14_0_bootloader_load_intf_drv(struct psp_context *psp) static int psp_v14_0_bootloader_load_dbg_drv(struct psp_context *psp) { - return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_DBGDRV); + /* dbg_drv was renamed to had_drv in psp v14 */ + return psp_v14_0_bootloader_load_component(psp, &psp->dbg_drv, PSP_BL__LOAD_HADDRV); } static int psp_v14_0_bootloader_load_ras_drv(struct psp_context *psp) -- cgit v1.2.3-70-g09d2 From fa62c03917732efd65b652ee0c97632913367138 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Wed, 26 Apr 2023 14:02:25 +0800 Subject: drm/amd/swsmu: add smu14 ip support Add initial swSMU support for smu 14 series ASIC. v2: rebase (Alex) Signed-off-by: Kenneth Feng Signed-off-by: Likun Gao Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h | 3 +- drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 77 +++++++++++++++++--------- 2 files changed, 54 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 6cdfee5052d9..be5b24d3dabd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -39,7 +39,8 @@ #define MP1_SRAM 0x03c00004 /* address block */ -#define smnMP1_FIRMWARE_FLAGS 0x3010028 +#define smnMP1_FIRMWARE_FLAGS_14_0_0 0x3010028 +#define smnMP1_FIRMWARE_FLAGS 0x3010024 #define smnMP1_PUB_CTRL 0x3010d10 #define MAX_DPM_LEVELS 16 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 2c3517397b14..33fc0331fac2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -38,8 +38,13 @@ #include "amdgpu_ras.h" #include "smu_cmn.h" -#include "asic_reg/mp/mp_14_0_0_offset.h" -#include "asic_reg/mp/mp_14_0_0_sh_mask.h" +#include "asic_reg/mp/mp_14_0_2_offset.h" +#include "asic_reg/mp/mp_14_0_2_sh_mask.h" + +#define regMP1_SMN_IH_SW_INT_mp1_14_0_0 0x0341 +#define regMP1_SMN_IH_SW_INT_mp1_14_0_0_BASE_IDX 0 +#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0 0x0342 +#define regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0_BASE_IDX 0 /* * DO NOT use these for err/warn/info/debug messages. @@ -52,6 +57,7 @@ #undef pr_debug MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); +MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 @@ -59,7 +65,7 @@ int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; char fw_name[30]; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -106,7 +112,6 @@ void smu_v14_0_fini_microcode(struct smu_context *smu) int smu_v14_0_load_microcode(struct smu_context *smu) { -#if 0 struct amdgpu_device *adev = smu->adev; const uint32_t *src; const struct smc_firmware_header_v1_0 *hdr; @@ -131,8 +136,12 @@ int smu_v14_0_load_microcode(struct smu_context *smu) 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK); for (i = 0; i < adev->usec_timeout; i++) { - mp1_fw_flags = RREG32_PCIE(MP1_Public | - (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); + else + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) break; @@ -142,9 +151,7 @@ int smu_v14_0_load_microcode(struct smu_context *smu) if (i == adev->usec_timeout) return -ETIME; -#endif return 0; - } int smu_v14_0_init_pptable_microcode(struct smu_context *smu) @@ -198,7 +205,11 @@ int smu_v14_0_check_fw_status(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; - mp1_fw_flags = RREG32_PCIE(MP1_Public | + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) + mp1_fw_flags = RREG32_PCIE(MP1_Public | + (smnMP1_FIRMWARE_FLAGS_14_0_0 & 0xffffffff)); + else + mp1_fw_flags = RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> @@ -227,16 +238,15 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) adev->pm.fw_version = smu_version; switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(14, 0, 2): - smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; - break; case IP_VERSION(14, 0, 0): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; break; case IP_VERSION(14, 0, 1): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; break; - + case IP_VERSION(14, 0, 2): + smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; + break; default: dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", amdgpu_ip_version(adev, MP1_HWIP, 0)); @@ -738,9 +748,9 @@ int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) struct amdgpu_device *adev = smu->adev; switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { - case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): + case IP_VERSION(14, 0, 2): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -841,9 +851,15 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, // TODO /* For MP1 SW irqs */ - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); + } else { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + } break; case AMDGPU_IRQ_STATE_ENABLE: @@ -851,14 +867,25 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, // TODO /* For MP1 SW irqs */ - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); - - val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); - val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); - WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0)) { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_mp1_14_0_0, val); + + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL_mp1_14_0_0, val); + } else { + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); + + val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); + val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); + WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); + } break; default: -- cgit v1.2.3-70-g09d2 From 1dde20aa39541c3aab61c046d83fd1b64af95ec5 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Wed, 26 Apr 2023 14:12:01 +0800 Subject: drm/amd/swsmu: add smu14 driver if file Add initial smu14 driver if file v2: squash in updates (Alex) v3: squash in updates (Alex) v4: squash in updates (Alex) Signed-off-by: Kenneth Feng Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- .../pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h | 1836 ++++++++++++++++++++ 1 file changed, 1836 insertions(+) create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h new file mode 100644 index 000000000000..97a29b80fb13 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h @@ -0,0 +1,1836 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU14_DRIVER_IF_V14_0_H +#define SMU14_DRIVER_IF_V14_0_H + +//Increment this version if SkuTable_t or BoardTable_t change +#define PPTABLE_VERSION 0x18 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_DPPCLK_DPM_LEVELS 8 +#define NUM_DPREFCLK_DPM_LEVELS 8 +#define NUM_DCFCLK_DPM_LEVELS 8 +#define NUM_DTBCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 6 +#define NUM_LINK_LEVELS 3 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_OD_FAN_MAX_POINTS 6 + +// Feature Control Defines +#define FEATURE_FW_DATA_READ_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT 2 +#define FEATURE_DPM_UCLK_BIT 3 +#define FEATURE_DPM_FCLK_BIT 4 +#define FEATURE_DPM_SOCCLK_BIT 5 +#define FEATURE_DPM_LINK_BIT 6 +#define FEATURE_DPM_DCN_BIT 7 +#define FEATURE_VMEMP_SCALING_BIT 8 +#define FEATURE_VDDIO_MEM_SCALING_BIT 9 +#define FEATURE_DS_GFXCLK_BIT 10 +#define FEATURE_DS_SOCCLK_BIT 11 +#define FEATURE_DS_FCLK_BIT 12 +#define FEATURE_DS_LCLK_BIT 13 +#define FEATURE_DS_DCFCLK_BIT 14 +#define FEATURE_DS_UCLK_BIT 15 +#define FEATURE_GFX_ULV_BIT 16 +#define FEATURE_FW_DSTATE_BIT 17 +#define FEATURE_GFXOFF_BIT 18 +#define FEATURE_BACO_BIT 19 +#define FEATURE_MM_DPM_BIT 20 +#define FEATURE_SOC_MPCLK_DS_BIT 21 +#define FEATURE_BACO_MPCLK_DS_BIT 22 +#define FEATURE_THROTTLERS_BIT 23 +#define FEATURE_SMARTSHIFT_BIT 24 +#define FEATURE_GTHR_BIT 25 +#define FEATURE_ACDC_BIT 26 +#define FEATURE_VR0HOT_BIT 27 +#define FEATURE_FW_CTF_BIT 28 +#define FEATURE_FAN_CONTROL_BIT 29 +#define FEATURE_GFX_DCS_BIT 30 +#define FEATURE_GFX_READ_MARGIN_BIT 31 +#define FEATURE_LED_DISPLAY_BIT 32 +#define FEATURE_GFXCLK_SPREAD_SPECTRUM_BIT 33 +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 34 +#define FEATURE_OPTIMIZED_VMIN_BIT 35 +#define FEATURE_GFX_IMU_BIT 36 +#define FEATURE_BOOT_TIME_CAL_BIT 37 +#define FEATURE_GFX_PCC_DFLL_BIT 38 +#define FEATURE_SOC_CG_BIT 39 +#define FEATURE_DF_CSTATE_BIT 40 +#define FEATURE_GFX_EDC_BIT 41 +#define FEATURE_BOOT_POWER_OPT_BIT 42 +#define FEATURE_CLOCK_POWER_DOWN_BYPASS_BIT 43 +#define FEATURE_DS_VCN_BIT 44 +#define FEATURE_BACO_CG_BIT 45 +#define FEATURE_MEM_TEMP_READ_BIT 46 +#define FEATURE_ATHUB_MMHUB_PG_BIT 47 +#define FEATURE_SOC_PCC_BIT 48 +#define FEATURE_EDC_PWRBRK_BIT 49 +#define FEATURE_SOC_EDC_XVMIN_BIT 50 +#define FEATURE_GFX_PSM_DIDT_BIT 51 +#define FEATURE_APT_ALL_ENABLE_BIT 52 +#define FEATURE_APT_SQ_THROTTLE_BIT 53 +#define FEATURE_APT_PF_DCS_BIT 54 +#define FEATURE_GFX_EDC_XVMIN_BIT 55 +#define FEATURE_GFX_DIDT_XVMIN_BIT 56 +#define FEATURE_FAN_ABNORMAL_BIT 57 +#define FEATURE_CLOCK_STRETCH_COMPENSATOR 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 +#define NUM_FEATURES 64 + +#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL +#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \ + (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \ + (1 << FEATURE_DPM_UCLK_BIT) | \ + (1 << FEATURE_DPM_FCLK_BIT) | \ + (1 << FEATURE_DPM_SOCCLK_BIT) | \ + (1 << FEATURE_DPM_LINK_BIT) | \ + (1 << FEATURE_DPM_DCN_BIT) | \ + (1 << FEATURE_DS_GFXCLK_BIT) | \ + (1 << FEATURE_DS_SOCCLK_BIT) | \ + (1 << FEATURE_DS_FCLK_BIT) | \ + (1 << FEATURE_DS_LCLK_BIT) | \ + (1 << FEATURE_DS_DCFCLK_BIT) | \ + (1 << FEATURE_DS_UCLK_BIT) | \ + (1ULL << FEATURE_DS_VCN_BIT) + + +//For use with feature control messages +typedef enum { + FEATURE_PWR_ALL, + FEATURE_PWR_S5, + FEATURE_PWR_BACO, + FEATURE_PWR_SOC, + FEATURE_PWR_GFX, + FEATURE_PWR_DOMAIN_COUNT, +} FEATURE_PWR_DOMAIN_e; + +//For use with feature control + BTC save restore +typedef enum { + FEATURE_BTC_NOP, + FEATURE_BTC_SAVE, + FEATURE_BTC_RESTORE, + FEATURE_BTC_COUNT, +} FEATURE_BTC_e; + +// Debug Overrides Bitmask +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000001 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_DCN_FCLK 0x00000002 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_MP0_FCLK 0x00000004 +#define DEBUG_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCFCLK 0x00000008 +#define DEBUG_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00000010 +#define DEBUG_OVERRIDE_DISABLE_VCN_PG 0x00000020 +#define DEBUG_OVERRIDE_DISABLE_FMAX_VMAX 0x00000040 +#define DEBUG_OVERRIDE_DISABLE_IMU_FW_CHECKS 0x00000080 +#define DEBUG_OVERRIDE_DISABLE_D0i2_REENTRY_HSR_TIMER_CHECK 0x00000100 +#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200 +#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400 +#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800 +#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000 +#define DEBUG_OVERRIDE_ENABLE_SOC_VF_BRINGUP_MODE 0x00002000 +#define DEBUG_OVERRIDE_ENABLE_PER_WGP_RESIENCY 0x00004000 +#define DEBUG_OVERRIDE_DISABLE_MEMORY_VOLTAGE_SCALING 0x00008000 + +// VR Mapping Bit Defines +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + +// PSI Bit Defines +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + +typedef enum { + SVI_PSI_0, // Full phase count (default) + SVI_PSI_1, // Phase count 1st level + SVI_PSI_2, // Phase count 2nd level + SVI_PSI_3, // Single phase operation + active diode emulation + SVI_PSI_4, // Single phase operation + passive diode emulation *optional* + SVI_PSI_5, // Reserved + SVI_PSI_6, // Power down to 0V (voltage regulation disabled) + SVI_PSI_7, // Automated phase shedding and diode emulation +} SVI_PSI_e; + +// Throttler Control/Status Bits +#define THROTTLER_TEMP_EDGE_BIT 0 +#define THROTTLER_TEMP_HOTSPOT_BIT 1 +#define THROTTLER_TEMP_HOTSPOT_GFX_BIT 2 +#define THROTTLER_TEMP_HOTSPOT_SOC_BIT 3 +#define THROTTLER_TEMP_MEM_BIT 4 +#define THROTTLER_TEMP_VR_GFX_BIT 5 +#define THROTTLER_TEMP_VR_SOC_BIT 6 +#define THROTTLER_TEMP_VR_MEM0_BIT 7 +#define THROTTLER_TEMP_VR_MEM1_BIT 8 +#define THROTTLER_TEMP_LIQUID0_BIT 9 +#define THROTTLER_TEMP_LIQUID1_BIT 10 +#define THROTTLER_TEMP_PLX_BIT 11 +#define THROTTLER_TDC_GFX_BIT 12 +#define THROTTLER_TDC_SOC_BIT 13 +#define THROTTLER_PPT0_BIT 14 +#define THROTTLER_PPT1_BIT 15 +#define THROTTLER_PPT2_BIT 16 +#define THROTTLER_PPT3_BIT 17 +#define THROTTLER_FIT_BIT 18 +#define THROTTLER_GFX_APCC_PLUS_BIT 19 +#define THROTTLER_GFX_DVO_BIT 20 +#define THROTTLER_COUNT 21 + +// FW DState Features Control Bits +#define FW_DSTATE_SOC_ULV_BIT 0 +#define FW_DSTATE_G6_HSR_BIT 1 +#define FW_DSTATE_G6_PHY_VMEMP_OFF_BIT 2 +#define FW_DSTATE_SMN_DS_BIT 3 +#define FW_DSTATE_MP1_WHISPER_MODE_BIT 4 +#define FW_DSTATE_SOC_LIV_MIN_BIT 5 +#define FW_DSTATE_SOC_PLL_PWRDN_BIT 6 +#define FW_DSTATE_MEM_PLL_PWRDN_BIT 7 +#define FW_DSTATE_MALL_ALLOC_BIT 8 +#define FW_DSTATE_MEM_PSI_BIT 9 +#define FW_DSTATE_HSR_NON_STROBE_BIT 10 +#define FW_DSTATE_MP0_ENTER_WFI_BIT 11 +#define FW_DSTATE_MALL_FLUSH_BIT 12 +#define FW_DSTATE_SOC_PSI_BIT 13 +#define FW_DSTATE_MMHUB_INTERLOCK_BIT 14 +#define FW_DSTATE_D0i3_2_QUIET_FW_BIT 15 +#define FW_DSTATE_CLDO_PRG_BIT 16 +#define FW_DSTATE_DF_PLL_PWRDN_BIT 17 + +//LED Display Mask & Control Bits +#define LED_DISPLAY_GFX_DPM_BIT 0 +#define LED_DISPLAY_PCIE_BIT 1 +#define LED_DISPLAY_ERROR_BIT 2 + + +#define MEM_TEMP_READ_OUT_OF_BAND_BIT 0 +#define MEM_TEMP_READ_IN_BAND_REFRESH_BIT 1 +#define MEM_TEMP_READ_IN_BAND_DUMMY_PSTATE_BIT 2 + +typedef enum { + SMARTSHIFT_VERSION_1, + SMARTSHIFT_VERSION_2, + SMARTSHIFT_VERSION_3, +} SMARTSHIFT_VERSION_e; + +typedef enum { + FOPT_CALC_AC_CALC_DC, + FOPT_PPTABLE_AC_CALC_DC, + FOPT_CALC_AC_PPTABLE_DC, + FOPT_PPTABLE_AC_PPTABLE_DC, +} FOPT_CALC_e; + +typedef enum { + DRAM_BIT_WIDTH_DISABLED = 0, + DRAM_BIT_WIDTH_X_8 = 8, + DRAM_BIT_WIDTH_X_16 = 16, + DRAM_BIT_WIDTH_X_32 = 32, + DRAM_BIT_WIDTH_X_64 = 64, + DRAM_BIT_WIDTH_X_128 = 128, + DRAM_BIT_WIDTH_COUNT, +} DRAM_BIT_WIDTH_TYPE_e; + +//I2C Interface +#define NUM_I2C_CONTROLLERS 8 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 24 + +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VMEMP, + I2C_CONTROLLER_NAME_VR_VDDIO, + I2C_CONTROLLER_NAME_LIQUID0, + I2C_CONTROLLER_NAME_LIQUID1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_FAN_INTAKE, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_VMEMP, + I2C_CONTROLLER_THROTTLER_VR_VDDIO, + I2C_CONTROLLER_THROTTLER_LIQUID0, + I2C_CONTROLLER_THROTTLER_LIQUID1, + I2C_CONTROLLER_THROTTLER_PLX, + I2C_CONTROLLER_THROTTLER_FAN_INTAKE, + I2C_CONTROLLER_THROTTLER_INA3221, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, + I2C_CONTROLLER_PROTOCOL_VR_IR35217, + I2C_CONTROLLER_PROTOCOL_TMP_MAX31875, + I2C_CONTROLLER_PROTOCOL_INA3221, + I2C_CONTROLLER_PROTOCOL_TMP_MAX6604, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ControllerName; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; + uint8_t PaddingConfig; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL = 0, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K = 0, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ = 0, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 +#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) +#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) + +typedef struct { + uint8_t ReadWriteData; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select + uint8_t SlaveAddress; //Slave address of device + uint8_t NumCmds; //Number of commands + + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; +} SwI2cRequest_t; // SW I2C Request Table + +typedef struct { + SwI2cRequest_t SwI2cRequest; + + uint32_t Spare[8]; + uint32_t MmHubPadding[8]; // SMU internal use +} SwI2cRequestExternal_t; + +typedef struct { + uint64_t mca_umc_status; + uint64_t mca_umc_addr; + + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + + uint32_t eccPadding; +} EccInfo_t; + +typedef struct { + EccInfo_t EccInfo[24]; +} EccInfoTable_t; + +//D3HOT sequences +typedef enum { + BACO_SEQUENCE, + MSR_SEQUENCE, + BAMACO_SEQUENCE, + ULPS_SEQUENCE, + D3HOT_SEQUENCE_COUNT, +} D3HOTSequence_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_DYNAMIC_MODE = 0, + PG_STATIC_MODE, +} PowerGatingMode_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_POWER_DOWN = 0, + PG_POWER_UP, +} PowerGatingSettings_e; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} DroopInt_t; + +typedef enum { + DCS_ARCH_DISABLED, + DCS_ARCH_FADCS, + DCS_ARCH_ASYNC, +} DCS_ARCH_e; + +//Only Clks that have DPM descriptors are listed here +typedef enum { + PPCLK_GFXCLK = 0, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_DCLK_0, + PPCLK_VCLK_0, + PPCLK_DISPCLK, + PPCLK_DPPCLK, + PPCLK_DPREFCLK, + PPCLK_DCFCLK, + PPCLK_DTBCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + VOLTAGE_MODE_PPTABLE = 0, + VOLTAGE_MODE_FUSES, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + +typedef enum { + AVFS_TEMP_COLD = 0, + AVFS_TEMP_HOT, + AVFS_TEMP_COUNT, +} AVFS_TEMP_e; + +typedef enum { + AVFS_D_G, + AVFS_D_COUNT, +} AVFS_D_e; + + +typedef enum { + UCLK_DIV_BY_1 = 0, + UCLK_DIV_BY_2, + UCLK_DIV_BY_4, + UCLK_DIV_BY_8, +} UCLK_DIV_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW = 0, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +typedef enum { + PWR_CONFIG_TDP = 0, + PWR_CONFIG_TGP, + PWR_CONFIG_TCP_ESTIMATED, + PWR_CONFIG_TCP_MEASURED, + PWR_CONFIG_TBP_DESKTOP, + PWR_CONFIG_TBP_MOBILE, +} PwrConfig_e; + +typedef struct { + uint8_t Padding; + uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint8_t CalculateFopt; // Indication whether FW should calculate Fopt or use values below. Reference FOPT_CALC_e + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + uint32_t Padding3[3]; + uint16_t Padding4; + uint16_t FoptimalDc; //Foptimal frequency in DC power mode. + uint16_t FoptimalAc; //Foptimal frequency in AC power mode. + uint16_t Padding2; +} DpmDescriptor_t; + +typedef enum { + PPT_THROTTLER_PPT0, + PPT_THROTTLER_PPT1, + PPT_THROTTLER_PPT2, + PPT_THROTTLER_PPT3, + PPT_THROTTLER_COUNT +} PPT_THROTTLER_e; + +typedef enum { + TEMP_EDGE, + TEMP_HOTSPOT, + TEMP_HOTSPOT_GFX, + TEMP_HOTSPOT_SOC, + TEMP_MEM, + TEMP_VR_GFX, + TEMP_VR_SOC, + TEMP_VR_MEM0, + TEMP_VR_MEM1, + TEMP_LIQUID0, + TEMP_LIQUID1, + TEMP_PLX, + TEMP_COUNT, +} TEMP_e; + +typedef enum { + TDC_THROTTLER_GFX, + TDC_THROTTLER_SOC, + TDC_THROTTLER_COUNT +} TDC_THROTTLER_e; + +typedef enum { + SVI_PLANE_VDD_GFX, + SVI_PLANE_VDD_SOC, + SVI_PLANE_VDDCI_MEM, + SVI_PLANE_VDDIO_MEM, + SVI_PLANE_COUNT, +} SVI_PLANE_e; + +typedef enum { + PMFW_VOLT_PLANE_GFX, + PMFW_VOLT_PLANE_SOC, + PMFW_VOLT_PLANE_COUNT +} PMFW_VOLT_PLANE_e; + +typedef enum { + CUSTOMER_VARIANT_ROW, + CUSTOMER_VARIANT_FALCON, + CUSTOMER_VARIANT_COUNT, +} CUSTOMER_VARIANT_e; + +typedef enum { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_COUNT, +} POWER_SOURCE_e; + +typedef enum { + MEM_VENDOR_PLACEHOLDER0, // 0 + MEM_VENDOR_SAMSUNG, // 1 + MEM_VENDOR_INFINEON, // 2 + MEM_VENDOR_ELPIDA, // 3 + MEM_VENDOR_ETRON, // 4 + MEM_VENDOR_NANYA, // 5 + MEM_VENDOR_HYNIX, // 6 + MEM_VENDOR_MOSEL, // 7 + MEM_VENDOR_WINBOND, // 8 + MEM_VENDOR_ESMT, // 9 + MEM_VENDOR_PLACEHOLDER1, // 10 + MEM_VENDOR_PLACEHOLDER2, // 11 + MEM_VENDOR_PLACEHOLDER3, // 12 + MEM_VENDOR_PLACEHOLDER4, // 13 + MEM_VENDOR_PLACEHOLDER5, // 14 + MEM_VENDOR_MICRON, // 15 + MEM_VENDOR_COUNT, +} MEM_VENDOR_e; + +typedef enum { + PP_GRTAVFS_HW_CPO_CTL_ZONE0, + PP_GRTAVFS_HW_CPO_CTL_ZONE1, + PP_GRTAVFS_HW_CPO_CTL_ZONE2, + PP_GRTAVFS_HW_CPO_CTL_ZONE3, + PP_GRTAVFS_HW_CPO_CTL_ZONE4, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE0, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE0, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE1, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE1, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE2, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE2, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE3, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE3, + PP_GRTAVFS_HW_CPO_EN_0_31_ZONE4, + PP_GRTAVFS_HW_CPO_EN_32_63_ZONE4, + PP_GRTAVFS_HW_ZONE0_VF, + PP_GRTAVFS_HW_ZONE1_VF1, + PP_GRTAVFS_HW_ZONE2_VF2, + PP_GRTAVFS_HW_ZONE3_VF3, + PP_GRTAVFS_HW_VOLTAGE_GB, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE0, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE1, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE2, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE3, + PP_GRTAVFS_HW_CPOSCALINGCTRL_ZONE4, + PP_GRTAVFS_HW_RESERVED_0, + PP_GRTAVFS_HW_RESERVED_1, + PP_GRTAVFS_HW_RESERVED_2, + PP_GRTAVFS_HW_RESERVED_3, + PP_GRTAVFS_HW_RESERVED_4, + PP_GRTAVFS_HW_RESERVED_5, + PP_GRTAVFS_HW_RESERVED_6, + PP_GRTAVFS_HW_FUSE_COUNT, +} PP_GRTAVFS_HW_FUSE_e; + +typedef enum { + PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z1_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z2_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z3_COLD_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_HOT_T0, + PP_GRTAVFS_FW_COMMON_PPVMIN_Z4_COLD_T0, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z0, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z1, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z2, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z3, + PP_GRTAVFS_FW_COMMON_SRAM_RM_Z4, + PP_GRTAVFS_FW_COMMON_FUSE_COUNT, +} PP_GRTAVFS_FW_COMMON_FUSE_e; + +typedef enum { + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_NEG_1, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_0, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_1, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_2, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_3, + PP_GRTAVFS_FW_SEP_FUSE_GB1_PWL_VOLTAGE_4, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_NEG_1, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_0, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_1, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_2, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_3, + PP_GRTAVFS_FW_SEP_FUSE_GB2_PWL_VOLTAGE_4, + PP_GRTAVFS_FW_SEP_FUSE_VF_NEG_1_FREQUENCY, + PP_GRTAVFS_FW_SEP_FUSE_VF4_FREQUENCY, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_0, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_1, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_2, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_3, + PP_GRTAVFS_FW_SEP_FUSE_FREQUENCY_TO_COUNT_SCALER_4, + PP_GRTAVFS_FW_SEP_FUSE_COUNT, +} PP_GRTAVFS_FW_SEP_FUSE_e; + +#define PP_NUM_RTAVFS_PWL_ZONES 5 + + +// VBIOS or PPLIB configures telemetry slope and offset. Only slope expected to be set for SVI3 +// Slope Q1.7, Offset Q1.2 +typedef struct { + int8_t Offset; // in Amps + uint8_t Padding; + uint16_t MaxCurrent; // in Amps +} SviTelemetryScale_t; + +#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1 + +#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0 +#define PP_OD_FEATURE_GFX_VMAX_BIT 1 +#define PP_OD_FEATURE_SOC_VMAX_BIT 2 +#define PP_OD_FEATURE_PPT_BIT 3 +#define PP_OD_FEATURE_FAN_CURVE_BIT 4 +#define PP_OD_FEATURE_FAN_LEGACY_BIT 5 +#define PP_OD_FEATURE_FULL_CTRL_BIT 6 +#define PP_OD_FEATURE_TDC_BIT 7 +#define PP_OD_FEATURE_GFXCLK_BIT 8 +#define PP_OD_FEATURE_UCLK_BIT 9 +#define PP_OD_FEATURE_FCLK_BIT 10 +#define PP_OD_FEATURE_ZERO_FAN_BIT 11 +#define PP_OD_FEATURE_TEMPERATURE_BIT 12 +#define PP_OD_FEATURE_EDC_BIT 13 +#define PP_OD_FEATURE_COUNT 14 + +typedef enum { + PP_OD_POWER_FEATURE_ALWAYS_ENABLED, + PP_OD_POWER_FEATURE_DISABLED_WHILE_GAMING, + PP_OD_POWER_FEATURE_ALWAYS_DISABLED, +} PP_OD_POWER_FEATURE_e; + +typedef enum { + FAN_MODE_AUTO = 0, + FAN_MODE_MANUAL_LINEAR, +} FanMode_e; + +typedef enum { + OD_NO_ERROR, + OD_REQUEST_ADVANCED_NOT_SUPPORTED, + OD_UNSUPPORTED_FEATURE, + OD_INVALID_FEATURE_COMBO_ERROR, + OD_GFXCLK_VF_CURVE_OFFSET_ERROR, + OD_VDD_GFX_VMAX_ERROR, + OD_VDD_SOC_VMAX_ERROR, + OD_PPT_ERROR, + OD_FAN_MIN_PWM_ERROR, + OD_FAN_ACOUSTIC_TARGET_ERROR, + OD_FAN_ACOUSTIC_LIMIT_ERROR, + OD_FAN_TARGET_TEMP_ERROR, + OD_FAN_ZERO_RPM_STOP_TEMP_ERROR, + OD_FAN_CURVE_PWM_ERROR, + OD_FAN_CURVE_TEMP_ERROR, + OD_FULL_CTRL_GFXCLK_ERROR, + OD_FULL_CTRL_UCLK_ERROR, + OD_FULL_CTRL_FCLK_ERROR, + OD_FULL_CTRL_VDD_GFX_ERROR, + OD_FULL_CTRL_VDD_SOC_ERROR, + OD_TDC_ERROR, + OD_GFXCLK_ERROR, + OD_UCLK_ERROR, + OD_FCLK_ERROR, + OD_OP_TEMP_ERROR, + OD_OP_GFX_EDC_ERROR, + OD_OP_GFX_PCC_ERROR, + OD_POWER_FEATURE_CTRL_ERROR, +} OD_FAIL_e; + +typedef struct { + uint32_t FeatureCtrlMask; + + //Voltage control + int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; + + uint16_t VddGfxVmax; // in mV + uint16_t VddSocVmax; + + uint8_t IdlePwrSavingFeaturesCtrl; + uint8_t RuntimePwrSavingFeaturesCtrl; + uint16_t Padding; + + //Frequency changes + int16_t GfxclkFmin; // MHz + int16_t GfxclkFmax; // MHz + uint16_t UclkFmin; // MHz + uint16_t UclkFmax; // MHz + uint16_t FclkFmin; + uint16_t FclkFmax; + + //PPT + int16_t Ppt; // % + int16_t Tdc; + + //Fan control + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; + uint16_t FanMinimumPwm; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanTargetTemperature; // Degree Celcius + uint8_t FanZeroRpmEnable; + uint8_t FanZeroRpmStopTemp; + uint8_t FanMode; + uint8_t MaxOpTemp; + + uint8_t AdvancedOdModeEnabled; + uint8_t Padding1[3]; + + uint16_t GfxVoltageFullCtrlMode; + uint16_t SocVoltageFullCtrlMode; + uint16_t GfxclkFullCtrlMode; + uint16_t UclkFullCtrlMode; + uint16_t FclkFullCtrlMode; + uint16_t Padding2; + + int16_t GfxEdc; + int16_t GfxPccLimitControl; + + uint32_t Spare[10]; + uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround +} OverDriveTable_t; + +typedef struct { + OverDriveTable_t OverDriveTable; + +} OverDriveTableExternal_t; + +typedef struct { + uint32_t FeatureCtrlMask; + + //Gfx Vf Curve + int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; + //gfx Vmax + uint16_t VddGfxVmax; // in mV + //soc Vmax + uint16_t VddSocVmax; + + //gfxclk + int16_t GfxclkFmin; // MHz + int16_t GfxclkFmax; // MHz + //uclk + uint16_t UclkFmin; // MHz + uint16_t UclkFmax; // MHz + //fclk + uint16_t FclkFmin; + uint16_t FclkFmax; + + //PPT + int16_t Ppt; // % + //TDC + int16_t Tdc; + + //Fan Curve + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; + //Fan Legacy + uint16_t FanMinimumPwm; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanTargetTemperature; // Degree Celcius + //zero fan + uint8_t FanZeroRpmEnable; + //temperature + uint8_t MaxOpTemp; + uint8_t Padding[2]; + + //Full Ctrl + uint16_t GfxVoltageFullCtrlMode; + uint16_t SocVoltageFullCtrlMode; + uint16_t GfxclkFullCtrlMode; + uint16_t UclkFullCtrlMode; + uint16_t FclkFullCtrlMode; + //EDC + int16_t GfxEdc; + int16_t GfxPccLimitControl; + int16_t Padding1; + + uint32_t Spare[5]; +} OverDriveLimits_t; + +typedef enum { + BOARD_GPIO_SMUIO_0, + BOARD_GPIO_SMUIO_1, + BOARD_GPIO_SMUIO_2, + BOARD_GPIO_SMUIO_3, + BOARD_GPIO_SMUIO_4, + BOARD_GPIO_SMUIO_5, + BOARD_GPIO_SMUIO_6, + BOARD_GPIO_SMUIO_7, + BOARD_GPIO_SMUIO_8, + BOARD_GPIO_SMUIO_9, + BOARD_GPIO_SMUIO_10, + BOARD_GPIO_SMUIO_11, + BOARD_GPIO_SMUIO_12, + BOARD_GPIO_SMUIO_13, + BOARD_GPIO_SMUIO_14, + BOARD_GPIO_SMUIO_15, + BOARD_GPIO_SMUIO_16, + BOARD_GPIO_SMUIO_17, + BOARD_GPIO_SMUIO_18, + BOARD_GPIO_SMUIO_19, + BOARD_GPIO_SMUIO_20, + BOARD_GPIO_SMUIO_21, + BOARD_GPIO_SMUIO_22, + BOARD_GPIO_SMUIO_23, + BOARD_GPIO_SMUIO_24, + BOARD_GPIO_SMUIO_25, + BOARD_GPIO_SMUIO_26, + BOARD_GPIO_SMUIO_27, + BOARD_GPIO_SMUIO_28, + BOARD_GPIO_SMUIO_29, + BOARD_GPIO_SMUIO_30, + BOARD_GPIO_SMUIO_31, + MAX_BOARD_GPIO_SMUIO_NUM, + BOARD_GPIO_DC_GEN_A, + BOARD_GPIO_DC_GEN_B, + BOARD_GPIO_DC_GEN_C, + BOARD_GPIO_DC_GEN_D, + BOARD_GPIO_DC_GEN_E, + BOARD_GPIO_DC_GEN_F, + BOARD_GPIO_DC_GEN_G, + BOARD_GPIO_DC_GENLK_CLK, + BOARD_GPIO_DC_GENLK_VSYNC, + BOARD_GPIO_DC_SWAPLOCK_A, + BOARD_GPIO_DC_SWAPLOCK_B, + MAX_BOARD_DC_GPIO_NUM, + BOARD_GPIO_LV_EN, +} BOARD_GPIO_TYPE_e; + +#define INVALID_BOARD_GPIO 0xFF + + +typedef struct { + //PLL 0 + uint16_t InitImuClk; + uint16_t InitSocclk; + uint16_t InitMpioclk; + uint16_t InitSmnclk; + //PLL 1 + uint16_t InitDispClk; + uint16_t InitDppClk; + uint16_t InitDprefclk; + uint16_t InitDcfclk; + uint16_t InitDtbclk; + uint16_t InitDbguSocClk; + //PLL 2 + uint16_t InitGfxclk_bypass; + uint16_t InitMp1clk; + uint16_t InitLclk; + uint16_t InitDbguBacoClk; + uint16_t InitBaco400clk; + uint16_t InitBaco1200clk_bypass; + uint16_t InitBaco700clk_bypass; + uint16_t InitBaco500clk; + // PLL 3 + uint16_t InitDclk0; + uint16_t InitVclk0; + // PLL 4 + uint16_t InitFclk; + uint16_t Padding1; + // PLL 5 + //UCLK clocks, assumed all UCLK instances will be the same. + uint8_t InitUclkLevel; // =0,1,2,3,4,5 frequency from FreqTableUclk + + uint8_t Padding[3]; + + uint32_t InitVcoFreqPll0; //smu_socclk_t + uint32_t InitVcoFreqPll1; //smu_displayclk_t + uint32_t InitVcoFreqPll2; //smu_nbioclk_t + uint32_t InitVcoFreqPll3; //smu_vcnclk_t + uint32_t InitVcoFreqPll4; //smu_fclk_t + uint32_t InitVcoFreqPll5; //smu_uclk_01_t + uint32_t InitVcoFreqPll6; //smu_uclk_23_t + uint32_t InitVcoFreqPll7; //smu_uclk_45_t + uint32_t InitVcoFreqPll8; //smu_uclk_67_t + + //encoding will be SVI3 + uint16_t InitGfx; // In mV(Q2) , should be 0? + uint16_t InitSoc; // In mV(Q2) + uint16_t InitVddIoMem; // In mV(Q2) MemVdd + uint16_t InitVddCiMem; // In mV(Q2) VMemP + + //uint16_t Padding2; + + uint32_t Spare[8]; +} BootValues_t; + +typedef struct { + uint16_t Power[PPT_THROTTLER_COUNT][POWER_SOURCE_COUNT]; // Watts + uint16_t Tdc[TDC_THROTTLER_COUNT]; // Amps + + uint16_t Temperature[TEMP_COUNT]; // Celsius + + uint8_t PwmLimitMin; + uint8_t PwmLimitMax; + uint8_t FanTargetTemperature; + uint8_t Spare1[1]; + + uint16_t AcousticTargetRpmThresholdMin; + uint16_t AcousticTargetRpmThresholdMax; + + uint16_t AcousticLimitRpmThresholdMin; + uint16_t AcousticLimitRpmThresholdMax; + + uint16_t PccLimitMin; + uint16_t PccLimitMax; + + uint16_t FanStopTempMin; + uint16_t FanStopTempMax; + uint16_t FanStartTempMin; + uint16_t FanStartTempMax; + + uint16_t PowerMinPpt0[POWER_SOURCE_COUNT]; + uint32_t Spare[11]; +} MsgLimits_t; + +typedef struct { + uint16_t BaseClockAc; + uint16_t GameClockAc; + uint16_t BoostClockAc; + uint16_t BaseClockDc; + uint16_t GameClockDc; + uint16_t BoostClockDc; + + uint32_t Reserved[4]; +} DriverReportedClocks_t; + +typedef struct { + uint8_t DcBtcEnabled; + uint8_t Padding[3]; + + uint16_t DcTol; // mV Q2 + uint16_t DcBtcGb; // mV Q2 + + uint16_t DcBtcMin; // mV Q2 + uint16_t DcBtcMax; // mV Q2 + + LinearInt_t DcBtcGbScalar; +} AvfsDcBtcParams_t; + +typedef struct { + uint16_t AvfsTemp[AVFS_TEMP_COUNT]; //in degrees C + uint16_t VftFMin; // in MHz + uint16_t VInversion; // in mV Q2 + QuadraticInt_t qVft[AVFS_TEMP_COUNT]; + QuadraticInt_t qAvfsGb; + QuadraticInt_t qAvfsGb2; +} AvfsFuseOverride_t; + +//all settings maintained by PFE team +typedef struct { + uint8_t Version; + uint8_t Spare8[3]; + // SECTION: Feature Control + uint32_t FeaturesToRun[NUM_FEATURES / 32]; // Features that PMFW will attempt to enable. Use FEATURE_*_BIT as mapping + // SECTION: FW DSTATE Settings + uint32_t FwDStateMask; // See FW_DSTATE_*_BIT for mapping + // SECTION: Advanced Options + uint32_t DebugOverrides; + + uint32_t Spare[2]; +} PFE_Settings_t; + +typedef struct { + // SECTION: Version + uint32_t Version; // should be unique to each SKU(i.e if any value changes in below structure then this value must be different) + + // SECTION: Miscellaneous Configuration + uint8_t TotalPowerConfig; // Determines how PMFW calculates the power. Use defines from PwrConfig_e + uint8_t CustomerVariant; //To specify if this PPTable is intended for a particular customer. Use defines from CUSTOMER_VARIANT_e + uint8_t MemoryTemperatureTypeMask; // Bit mapping indicating which methods of memory temperature reading are enabled. Use defines from MEM_TEMP_*BIT + uint8_t SmartShiftVersion; // Determine what SmartShift feature version is supported Use defines from SMARTSHIFT_VERSION_e + + // SECTION: Infrastructure Limits + uint8_t SocketPowerLimitSpare[10]; + + //if set to 1, SocketPowerLimitAc and SocketPowerLimitDc will be interpreted as legacy programs(i.e absolute power). If 0, all except index 0 will be scalars + //relative index 0 + uint8_t EnableLegacyPptLimit; + uint8_t UseInputTelemetry; //applicable to SVI3 only and only to be set if VRs support + + uint8_t SmartShiftMinReportedPptinDcs; //minimum possible active power consumption for this SKU. Used for SmartShift power reporting + + uint8_t PaddingPpt[7]; + + uint16_t HwCtfTempLimit; // In degrees Celsius. Temperature above which HW will trigger CTF. Consumed by VBIOS only + + uint16_t PaddingInfra; + + // Per year normalized Vmax state failure rates (sum of the two domains divided by life time in years) + uint32_t FitControllerFailureRateLimit; //in IEEE float + //Expected GFX Duty Cycle at Vmax. + uint32_t FitControllerGfxDutyCycle; // in IEEE float + //Expected SOC Duty Cycle at Vmax. + uint32_t FitControllerSocDutyCycle; // in IEEE float + + //This offset will be deducted from the controller output to before it goes through the SOC Vset limiter block. + uint32_t FitControllerSocOffset; //in IEEE float + + uint32_t GfxApccPlusResidencyLimit; // Percentage value. Used by APCC+ controller to control PCC residency to some value + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See THROTTLER_*_BIT for mapping + + + // SECTION: Voltage Control Parameters + uint16_t UlvVoltageOffset[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2). ULV offset used in either GFX_ULV or SOC_ULV(part of FW_DSTATE) + + uint8_t Padding[2]; + uint16_t DeepUlvVoltageOffsetSoc; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC as part of FW_DSTATE + + // Voltage Limits + uint16_t DefaultMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage without FIT controller enabled + uint16_t BoostMaxVoltage[PMFW_VOLT_PLANE_COUNT]; // In mV(Q2) Maximum voltage with FIT controller enabled + + //Vmin Optimizations + int16_t VminTempHystersis[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature hysteresis for switching between low/high temperature values for Vmin + int16_t VminTempThreshold[PMFW_VOLT_PLANE_COUNT]; // Celsius Temperature threshold for switching between low/high temperature values for Vmin + uint16_t Vmin_Hot_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at hot. + uint16_t Vmin_Cold_T0[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Initial (pre-aging) Vset to be used at cold. + uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot. + uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold. + uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin + uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot + uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold + + //This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for. + uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT]; + //Linear offset or GB term to account for mis-correlation between PSM and Vmin shift trends across parts. + uint16_t VcBtcVmin2PsmDegrationGb[PMFW_VOLT_PLANE_COUNT]; + //Scalar coefficient of the PSM aging degradation function + uint32_t VcBtcPsmA[PMFW_VOLT_PLANE_COUNT]; // A_PSM + //Exponential coefficient of the PSM aging degradation function + uint32_t VcBtcPsmB[PMFW_VOLT_PLANE_COUNT]; // B_PSM + //Scalar coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold. + uint32_t VcBtcVminA[PMFW_VOLT_PLANE_COUNT]; // A_VMIN + //Exponential coefficient of the VMIN aging degradation function. Specified as worst case between hot and cold. + uint32_t VcBtcVminB[PMFW_VOLT_PLANE_COUNT]; // B_VMIN + + uint8_t PerPartVminEnabled[PMFW_VOLT_PLANE_COUNT]; + uint8_t VcBtcEnabled[PMFW_VOLT_PLANE_COUNT]; + + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + + QuadraticInt_t Gfx_Vmin_droop; + QuadraticInt_t Soc_Vmin_droop; + uint32_t SpareVmin[6]; + + //SECTION: DPM Configuration 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableShadowUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz + uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + + uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint16_t GfxclkAibFmax; + uint16_t GfxclkFreqCap; + + //GFX Idle Power Settings + uint16_t GfxclkFgfxoffEntry; // Entry in RLC stage (PLL), in Mhz + uint16_t GfxclkFgfxoffExitImu; // Exit/Entry in IMU stage (BYPASS), in Mhz + uint16_t GfxclkFgfxoffExitRlc; // Exit in RLC stage (PLL), in Mhz + uint16_t GfxclkThrottleClock; //Used primarily in DCS + uint8_t EnableGfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages + uint8_t GfxIdlePadding; + + uint8_t SmsRepairWRCKClkDivEn; + uint8_t SmsRepairWRCKClkDivVal; + uint8_t GfxOffEntryEarlyMGCGEn; + uint8_t GfxOffEntryForceCGCGEn; + uint8_t GfxOffEntryForceCGCGDelayEn; + uint8_t GfxOffEntryForceCGCGDelayVal; // in microseconds + + uint16_t GfxclkFreqGfxUlv; // in MHz + uint8_t GfxIdlePadding2[2]; + uint32_t GfxOffEntryHysteresis; //For RLC to count after it enters CGCG, and before triggers GFXOFF entry + uint32_t GfxoffSpare[15]; + + // DFLL + uint16_t DfllMstrOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias + uint16_t DfllSlvOscConfigA; //Used for voltage sensitivity slope tuning: 0 = (en_leaker << 9) | (en_vint1_reduce << 8) | (gain_code << 6) | (bias_code << 3) | (vint1_code << 1) | en_bias + uint32_t DfllBtcMasterScalerM; + int32_t DfllBtcMasterScalerB; + uint32_t DfllBtcSlaveScalerM; + int32_t DfllBtcSlaveScalerB; + + uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg + uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg + uint32_t GfxDfllSpare[9]; + + // DVO + uint32_t DvoPsmDownThresholdVoltage; //Voltage float + uint32_t DvoPsmUpThresholdVoltage; //Voltage float + uint32_t DvoFmaxLowScaler; //Unitless float + + // GFX DCS + uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase + uint16_t PaddingDcs; + + uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase + uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. + + uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS. + + uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. + uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. + + uint32_t DcsPfGfxFopt; //Default to GFX FMIN + uint32_t DcsPfUclkFopt; //Default to UCLK FMIN + + uint8_t FoptEnabled; + uint8_t DcsSpare2[3]; + uint32_t DcsFoptM; //Tuning paramters to shift Fopt calculation, IEEE754 float + uint32_t DcsFoptB; //Tuning paramters to shift Fopt calculation, IEEE754 float + uint32_t DcsSpare[9]; + + // UCLK section + uint8_t UseStrobeModeOptimizations; //Set to indicate that FW should use strobe mode optimizations + uint8_t PaddingMem[3]; + + uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 6 Primary SW DPM states (6 + 6 Shadow) + uint8_t UclkDpmShadowPstates [NUM_UCLK_DPM_LEVELS]; // 6 Shadow SW DPM states (6 + 6 Shadow) + uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + uint8_t FreqTableShadowUclkDiv [NUM_UCLK_DPM_LEVELS]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + uint16_t MemVmempVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemVddioVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t DalDcModeMaxUclkFreq; + uint8_t PaddingsMem[2]; + //FCLK Section + uint16_t FclkDpmDisallowPstateFreq; //Frequency which FW will target when indicated that display config cannot support P-state. Set to 0 use FW calculated value + uint16_t PaddingFclk; + + // Link DPM Settings + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 4:PciE-gen5 + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + // SECTION: VDD_GFX AVFS + uint8_t OverrideGfxAvfsFuses; + uint8_t GfxAvfsPadding[3]; + + uint32_t SocHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //new added for Soc domain + uint32_t GfxL2HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; //see fusedoc for encoding + //uint32_t GfxSeHwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; + uint32_t spare_HwRtAvfsFuses[PP_GRTAVFS_HW_FUSE_COUNT]; + + uint32_t SocCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; + uint32_t GfxCommonRtAvfs[PP_GRTAVFS_FW_COMMON_FUSE_COUNT]; + + uint32_t SocFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + uint32_t GfxL2FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + //uint32_t GfxSeFwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + uint32_t spare_FwRtAvfsFuses[PP_GRTAVFS_FW_SEP_FUSE_COUNT]; + + uint32_t Soc_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t Gfx_Droop_PWL_F[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_a[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_b[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Gfx_Droop_PWL_c[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t Gfx_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES]; + uint32_t Soc_Static_PWL_Offset[PP_NUM_RTAVFS_PWL_ZONES]; + + uint32_t dGbV_dT_vmin; + uint32_t dGbV_dT_vmax; + + //Unused: PMFW-9370 + uint32_t V2F_vmin_range_low; + uint32_t V2F_vmin_range_high; + uint32_t V2F_vmax_range_low; + uint32_t V2F_vmax_range_high; + + AvfsDcBtcParams_t DcBtcGfxParams; + QuadraticInt_t SSCurve_GFX; + uint32_t GfxAvfsSpare[29]; + + //SECTION: VDD_SOC AVFS + uint8_t OverrideSocAvfsFuses; + uint8_t MinSocAvfsRevision; + uint8_t SocAvfsPadding[2]; + + AvfsFuseOverride_t SocAvfsFuseOverride[AVFS_D_COUNT]; + + DroopInt_t dBtcGbSoc[AVFS_D_COUNT]; // GHz->V BtcGb + + LinearInt_t qAgingGb[AVFS_D_COUNT]; // GHz->V + + QuadraticInt_t qStaticVoltageOffset[AVFS_D_COUNT]; // GHz->V + + AvfsDcBtcParams_t DcBtcSocParams[AVFS_D_COUNT]; + + QuadraticInt_t SSCurve_SOC; + uint32_t SocAvfsSpare[29]; + + //SECTION: Boot clock and voltage values + BootValues_t BootValues; + + //SECTION: Driver Reported Clocks + DriverReportedClocks_t DriverReportedClocks; + + //SECTION: Message Limits + MsgLimits_t MsgLimits; + + //SECTION: OverDrive Limits + OverDriveLimits_t OverDriveLimitsBasicMin; + OverDriveLimits_t OverDriveLimitsBasicMax; + OverDriveLimits_t OverDriveLimitsAdvancedMin; + OverDriveLimits_t OverDriveLimitsAdvancedMax; + + // Section: Total Board Power idle vs active coefficients + uint8_t TotalBoardPowerSupport; + uint8_t TotalBoardPowerPadding[1]; + uint16_t TotalBoardPowerRoc; + + //PMFW-11158 + QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT]; + + // APT GFX to UCLK mapping + int32_t AptUclkGfxclkLookup[POWER_SOURCE_COUNT][6]; + uint32_t AptUclkGfxclkLookupHyst[POWER_SOURCE_COUNT][6]; + uint32_t AptPadding; + + // Xvmin didt + QuadraticInt_t GfxXvminDidtDroopThresh; + uint32_t GfxXvminDidtResetDDWait; + uint32_t GfxXvminDidtClkStopWait; + uint32_t GfxXvminDidtFcsStepCtrl; + uint32_t GfxXvminDidtFcsWaitCtrl; + + // PSM based didt controller + uint32_t PsmModeEnabled; //0: all disabled 1: static mode only 2: dynamic mode only 3:static + dynamic mode + uint32_t P2v_a; // floating point in U32 format + uint32_t P2v_b; + uint32_t P2v_c; + uint32_t T2p_a; + uint32_t T2p_b; + uint32_t T2p_c; + uint32_t P2vTemp; + QuadraticInt_t PsmDidtStaticSettings; + QuadraticInt_t PsmDidtDynamicSettings; + uint8_t PsmDidtAvgDiv; + uint8_t PsmDidtForceStall; + uint16_t PsmDidtReleaseTimer; + uint32_t PsmDidtStallPattern; //Will be written to both pattern 1 and didt_static_level_prog + // CAC EDC + uint32_t Leakage_C0; // in IEEE float + uint32_t Leakage_C1; // in IEEE float + uint32_t Leakage_C2; // in IEEE float + uint32_t Leakage_C3; // in IEEE float + uint32_t Leakage_C4; // in IEEE float + uint32_t Leakage_C5; // in IEEE float + uint32_t GFX_CLK_SCALAR; // in IEEE float + uint32_t GFX_CLK_INTERCEPT; // in IEEE float + uint32_t GFX_CAC_M; // in IEEE float + uint32_t GFX_CAC_B; // in IEEE float + uint32_t VDD_GFX_CurrentLimitGuardband; // in IEEE float + uint32_t DynToTotalCacScalar; // in IEEE + // GFX EDC XVMIN + uint32_t XVmin_Gfx_EdcThreshScalar; + uint32_t XVmin_Gfx_EdcEnableFreq; + uint32_t XVmin_Gfx_EdcPccAsStepCtrl; + uint32_t XVmin_Gfx_EdcPccAsWaitCtrl; + uint16_t XVmin_Gfx_EdcThreshold; + uint16_t XVmin_Gfx_EdcFiltHysWaitCtrl; + // SOC EDC XVMIN + uint32_t XVmin_Soc_EdcThreshScalar; + uint32_t XVmin_Soc_EdcEnableFreq; + uint32_t XVmin_Soc_EdcThreshold; // LPF: number of cycles Xvmin_trig_filt will react. + uint16_t XVmin_Soc_EdcStepUpTime; // 10 bit, refclk count to step up throttle when PCC remains asserted. + uint16_t XVmin_Soc_EdcStepDownTime;// 10 bit, refclk count to step down throttle when PCC remains asserted. + uint8_t XVmin_Soc_EdcInitPccStep; // 3 bit, First Pcc Step number that will applied when PCC asserts. + uint8_t PaddingSocEdc[3]; + + // Fuse Override for SOC and GFX XVMIN + uint8_t GfxXvminFuseOverride; + uint8_t SocXvminFuseOverride; + uint8_t PaddingXvminFuseOverride[2]; + uint8_t GfxXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value + uint8_t GfxXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value + uint8_t SocXvminFddTempLow; // bit 7: sign, bit 0-6: ABS value + uint8_t SocXvminFddTempHigh; // bit 7: sign, bit 0-6: ABS value + + + uint16_t GfxXvminFddVolt0; // low voltage, in VID + uint16_t GfxXvminFddVolt1; // mid voltage, in VID + uint16_t GfxXvminFddVolt2; // high voltage, in VID + uint16_t SocXvminFddVolt0; // low voltage, in VID + uint16_t SocXvminFddVolt1; // mid voltage, in VID + uint16_t SocXvminFddVolt2; // high voltage, in VID + uint16_t GfxXvminDsFddDsm[6]; // XVMIN DS, same organization with fuse + uint16_t GfxXvminEdcFddDsm[6];// XVMIN GFX EDC, same organization with fuse + uint16_t SocXvminEdcFddDsm[6];// XVMIN SOC EDC, same organization with fuse + + // SECTION: Sku Reserved + uint32_t Spare; + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} SkuTable_t; + +typedef struct { + uint8_t SlewRateConditions; + uint8_t LoadLineAdjust; + uint8_t VoutOffset; + uint8_t VidMax; + uint8_t VidMin; + uint8_t TenBitTelEn; + uint8_t SixteenBitTelEn; + uint8_t OcpThresh; + uint8_t OcpWarnThresh; + uint8_t OcpSettings; + uint8_t VrhotThresh; + uint8_t OtpThresh; + uint8_t UvpOvpDeltaRef; + uint8_t PhaseShed; + uint8_t Padding[10]; + uint32_t SettingOverrideMask; +} Svi3RegulatorSettings_t; + +typedef struct { + // SECTION: Version + uint32_t Version; //should be unique to each board type + + // SECTION: I2C Control + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + //SECTION SVI3 Board Parameters + uint8_t SlaveAddrMapping[SVI_PLANE_COUNT]; + uint8_t VrPsiSupport[SVI_PLANE_COUNT]; + + uint32_t Svi3SvcSpeed; + uint8_t EnablePsi6[SVI_PLANE_COUNT]; // only applicable in SVI3 + + // SECTION: Voltage Regulator Settings + Svi3RegulatorSettings_t Svi3RegSettings[SVI_PLANE_COUNT]; + + // SECTION: GPIO Settings + uint8_t LedOffGpio; + uint8_t FanOffGpio; + uint8_t GfxVrPowerStageOffGpio; + + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + + uint8_t GthrGpio; // GPIO pin configured for GTHR Event + uint8_t GthrPolarity; // replace GPIO polarity for GTHR + + // LED Display Settings + uint8_t LedPin0; // GPIO number for LedPin[0] + uint8_t LedPin1; // GPIO number for LedPin[1] + uint8_t LedPin2; // GPIO number for LedPin[2] + uint8_t LedEnableMask; + + uint8_t LedPcie; // GPIO number for PCIE results + uint8_t LedError; // GPIO number for Error Cases + uint8_t PaddingLed; + + // SECTION: Clock Spread Spectrum + + // UCLK Spread Spectrum + uint8_t UclkTrainingModeSpreadPercent; // Q4.4 + uint8_t UclkSpreadPadding; + uint16_t UclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint8_t UclkSpreadPercent[MEM_VENDOR_COUNT]; + + // DFLL Spread Spectrum + uint8_t GfxclkSpreadEnable; + + // FCLK Spread Spectrum + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // Section: Memory Config + uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e + uint8_t PaddingMem1[7]; + + // SECTION: UMC feature flags + uint8_t HsrEnabled; + uint8_t VddqOffEnabled; + uint8_t PaddingUmcFlags[2]; + + uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued + uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS + + uint8_t FuseWritePowerMuxPresent; + uint8_t FuseWritePadding[3]; + + // SECTION: EDC Params + uint32_t LoadlineGfx; + uint32_t LoadlineSoc; + uint32_t GfxEdcLimit; + uint32_t SocEdcLimit; + + uint32_t RestBoardPower; //power consumed by board that is not captured by the SVI3 input telemetry + uint32_t ConnectorsImpedance; // impedance of the input ATX power connectors + + uint8_t EpcsSens0; //GPIO number for External Power Connector Support Sense0 + uint8_t EpcsSens1; //GPIO Number for External Power Connector Support Sense1 + uint8_t PaddingEpcs[2]; + + // SECTION: Board Reserved + uint32_t BoardSpare[52]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} BoardTable_t; + +typedef struct { + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in AC mode. Multiple limits supported + + uint16_t VrTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with VR regulator maximum temperature + + int16_t TotalIdleBoardPowerM; + int16_t TotalIdleBoardPowerB; + int16_t TotalBoardPowerM; + int16_t TotalBoardPowerB; + + uint16_t TemperatureLimit[TEMP_COUNT]; // In degrees Celsius. Temperature limit associated with each input + + // SECTION: Fan Control + uint16_t FanStopTemp[TEMP_COUNT]; //Celsius + uint16_t FanStartTemp[TEMP_COUNT]; //Celsius + + uint16_t FanGain[TEMP_COUNT]; + + uint16_t FanPwmMin; + uint16_t AcousticTargetRpmThreshold; + uint16_t AcousticLimitRpmThreshold; + uint16_t FanMaximumRpm; + uint16_t MGpuAcousticLimitRpmThreshold; + uint16_t FanTargetGfxclk; + uint32_t TempInputSelectMask; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + uint16_t FanPadding; + uint16_t FanTargetTemperature[TEMP_COUNT]; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + uint16_t FwCtfLimit[TEMP_COUNT]; + + uint16_t IntakeTempEnableRPM; + int16_t IntakeTempOffsetTemp; + uint16_t IntakeTempReleaseTemp; + uint16_t IntakeTempHighIntakeAcousticLimit; + + uint16_t IntakeTempAcouticLimitReleaseRate; + int16_t FanAbnormalTempLimitOffset; // FanStalledTempLimitOffset + uint16_t FanStalledTriggerRpm; // + uint16_t FanAbnormalTriggerRpmCoeff; // FanAbnormalTriggerRpm + + uint16_t FanSpare[1]; + uint8_t FanIntakeSensorSupport; + uint8_t FanIntakePadding; + uint32_t FanAmbientPerfBoostThreshold; + uint32_t FanSpare2[12]; + + uint16_t TemperatureLimit_Hynix; // In degrees Celsius. Memory temperature limit associated with Hynix + uint16_t TemperatureLimit_Micron; // In degrees Celsius. Memory temperature limit associated with Micron + uint16_t TemperatureFwCtfLimit_Hynix; + uint16_t TemperatureFwCtfLimit_Micron; + + // SECTION: Board Reserved + uint16_t PlatformTdcLimit[TDC_THROTTLER_COUNT]; // In Amperes. Current limit associated with platform maximum temperature per VR current rail + uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // In Watts. Power limit that PMFW attempts to control to in DC mode. Multiple limits supported + uint16_t SocketPowerLimitSmartShift2; // In Watts. Power limit used SmartShift + uint16_t CustomSkuSpare16b; + uint32_t CustomSkuSpare32b[10]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; +} CustomSkuTable_t; + +typedef struct { + PFE_Settings_t PFE_Settings; + SkuTable_t SkuTable; + CustomSkuTable_t CustomSkuTable; + BoardTable_t BoardTable; +} PPTable_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t FclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + uint16_t UclkMaxActivityLpfTau; + uint16_t SocketPowerLpfTau; + uint16_t VcnClkAverageLpfTau; + uint16_t VcnUsageAverageLpfTau; + uint16_t PcieActivityLpTau; +} DriverSmuConfig_t; + +typedef struct { + DriverSmuConfig_t DriverSmuConfig; + + uint32_t Spare[8]; + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfigExternal_t; + + +typedef struct { + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDppClk [NUM_DPPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDprefclk [NUM_DPREFCLK_DPM_LEVELS]; // In MHz + uint16_t FreqTableDcfclk [NUM_DCFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + + uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint16_t Padding; + + uint32_t Spare[32]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use + +} DriverInfoTable_t; + +typedef struct { + uint32_t CurrClock[PPCLK_COUNT]; + + uint16_t AverageGfxclkFrequencyTarget; + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageFclkFrequencyPreDs; + uint16_t AverageFclkFrequencyPostDs; + uint16_t AverageMemclkFrequencyPreDs ; // this is scaled to actual memory clock + uint16_t AverageMemclkFrequencyPostDs ; // this is scaled to actual memory clock + uint16_t AverageVclk0Frequency ; + uint16_t AverageDclk0Frequency ; + uint16_t AverageVclk1Frequency ; + uint16_t AverageDclk1Frequency ; + uint16_t PCIeBusy ; + uint16_t dGPU_W_MAX ; + uint16_t padding ; + + uint32_t MetricsCounter ; + + uint16_t AvgVoltage[SVI_PLANE_COUNT]; + uint16_t AvgCurrent[SVI_PLANE_COUNT]; + + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint16_t Vcn0ActivityPercentage ; + uint16_t Vcn1ActivityPercentage ; + + uint32_t EnergyAccumulator; + uint16_t AverageSocketPower; + uint16_t AverageTotalBoardPower; + + uint16_t AvgTemperature[TEMP_COUNT]; + uint16_t AvgTemperatureFanIntake; + + uint8_t PcieRate ; + uint8_t PcieWidth ; + + uint8_t AvgFanPwm; + uint8_t Padding[1]; + uint16_t AvgFanRpm; + + + uint8_t ThrottlingPercentage[THROTTLER_COUNT]; + uint8_t padding1[3]; + + //metrics for D3hot entry/exit and driver ARM msgs + uint32_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint32_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint32_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT]; + + uint16_t ApuSTAPMSmartShiftLimit; + uint16_t ApuSTAPMLimit; + uint16_t AvgApuSocketPower; + + uint16_t AverageUclkActivity_MAX; + + uint32_t PublicSerialNumberLower; + uint32_t PublicSerialNumberUpper; + +} SmuMetrics_t; + +typedef struct { + SmuMetrics_t SmuMetrics; + uint32_t Spare[30]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetricsExternal_t; + +typedef struct { + uint8_t WmSetting; + uint8_t Flags; + uint8_t Padding[2]; + +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WATERMARKS_CLOCK_RANGE = 0, + WATERMARKS_DUMMY_PSTATE, + WATERMARKS_MALL, + WATERMARKS_COUNT, +} WATERMARKS_FLAGS_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES]; +} Watermarks_t; + +typedef struct { + Watermarks_t Watermarks; + uint32_t Spare[16]; + + uint32_t MmHubPadding[8]; // SMU internal use +} WatermarksExternal_t; + +typedef struct { + uint16_t avgPsmCount[76]; + uint16_t minPsmCount[76]; + uint16_t maxPsmCount[76]; + float avgPsmVoltage[76]; + float minPsmVoltage[76]; + float maxPsmVoltage[76]; +} AvfsDebugTable_t; + +typedef struct { + AvfsDebugTable_t AvfsDebugTable; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsDebugTableExternal_t; + + +typedef struct { + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t PaddingGfx; + uint16_t Gfx_MinActiveFreq; // MHz + uint16_t Gfx_BoosterFreq; // MHz + uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Gfx_PD_Data_limit_a; // Q16 + uint32_t Gfx_PD_Data_limit_b; // Q16 + uint32_t Gfx_PD_Data_limit_c; // Q16 + uint32_t Gfx_PD_Data_error_coeff; // Q16 + uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 + + uint8_t Fclk_ActiveHystLimit; + uint8_t Fclk_IdleHystLimit; + uint8_t Fclk_FPS; + uint8_t Fclk_MinActiveFreqType; + uint8_t Fclk_BoosterFreqType; + uint8_t PaddingFclk; + uint16_t Fclk_MinActiveFreq; // MHz + uint16_t Fclk_BoosterFreq; // MHz + uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Fclk_PD_Data_limit_a; // Q16 + uint32_t Fclk_PD_Data_limit_b; // Q16 + uint32_t Fclk_PD_Data_limit_c; // Q16 + uint32_t Fclk_PD_Data_error_coeff; // Q16 + uint32_t Fclk_PD_Data_error_rate_coeff; // Q16 + + uint32_t Mem_UpThreshold_Limit[NUM_UCLK_DPM_LEVELS]; // Q16 + uint8_t Mem_UpHystLimit[NUM_UCLK_DPM_LEVELS]; + uint16_t Mem_DownHystLimit[NUM_UCLK_DPM_LEVELS]; + uint16_t Mem_Fps; + +} DpmActivityMonitorCoeffInt_t; + + +typedef struct { + DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt; + uint32_t MmHubPadding[8]; // SMU internal use +} DpmActivityMonitorCoeffIntExternal_t; + + + +// Workload bits +#define WORKLOAD_PPLIB_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_WINDOW_3D_BIT 7 +#define WORKLOAD_PPLIB_DIRECT_ML_BIT 8 +#define WORKLOAD_PPLIB_CGVDI_BIT 9 +#define WORKLOAD_PPLIB_COUNT 10 + + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF +#define TABLE_TRANSFER_PENDING 0xAB + +// Table types +#define TABLE_PPTABLE 0 +#define TABLE_COMBO_PPTABLE 1 +#define TABLE_WATERMARKS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_PMSTATUSLOG 4 +#define TABLE_SMU_METRICS 5 +#define TABLE_DRIVER_SMU_CONFIG 6 +#define TABLE_ACTIVITY_MONITOR_COEFF 7 +#define TABLE_OVERDRIVE 8 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_DRIVER_INFO 10 +#define TABLE_ECCINFO 11 +#define TABLE_CUSTOM_SKUTABLE 12 +#define TABLE_COUNT 13 + +//IH Interupt ID +#define IH_INTERRUPT_ID_TO_DRIVER 0xFE +#define IH_INTERRUPT_CONTEXT_ID_BACO 0x2 +#define IH_INTERRUPT_CONTEXT_ID_AC 0x3 +#define IH_INTERRUPT_CONTEXT_ID_DC 0x4 +#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5 +#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6 +#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 +#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 +#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 + +#endif -- cgit v1.2.3-70-g09d2 From 457ff2952b0b78994b25de73243ccd441e556a90 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Wed, 26 Apr 2023 14:13:53 +0800 Subject: drm/amd/swsmu: add smu v14_0_2 ppsmc file Add initial smu v14_0_2 ppsmc file v2: Squash in updates (Alex) v3: Squash in updates (Alex) Signed-off-by: Kenneth Feng Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- .../amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h | 140 +++++++++++++++++++++ 1 file changed, 140 insertions(+) create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h new file mode 100644 index 000000000000..de2e442281ff --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h @@ -0,0 +1,140 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_V14_0_2_PPSMC_H +#define SMU_V14_0_2_PPSMC_H + +#define PPSMC_VERSION 0x1 + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +// BASIC +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetRunningSmuFeaturesLow 0xC +#define PPSMC_MSG_GetRunningSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrHigh 0xE +#define PPSMC_MSG_SetDriverDramAddrLow 0xF +#define PPSMC_MSG_SetToolsDramAddrHigh 0x10 +#define PPSMC_MSG_SetToolsDramAddrLow 0x11 +#define PPSMC_MSG_TransferTableSmu2Dram 0x12 +#define PPSMC_MSG_TransferTableDram2Smu 0x13 +#define PPSMC_MSG_UseDefaultPPTable 0x14 + +//BACO/BAMACO/BOMACO +#define PPSMC_MSG_EnterBaco 0x15 +#define PPSMC_MSG_ExitBaco 0x16 +#define PPSMC_MSG_ArmD3 0x17 +#define PPSMC_MSG_BacoAudioD3PME 0x18 + +//DPM +#define PPSMC_MSG_SetSoftMinByFreq 0x19 +#define PPSMC_MSG_SetSoftMaxByFreq 0x1A +#define PPSMC_MSG_SetHardMinByFreq 0x1B +#define PPSMC_MSG_SetHardMaxByFreq 0x1C +#define PPSMC_MSG_GetMinDpmFreq 0x1D +#define PPSMC_MSG_GetMaxDpmFreq 0x1E +#define PPSMC_MSG_GetDpmFreqByIndex 0x1F +#define PPSMC_MSG_OverridePcieParameters 0x20 + +//DramLog Set DramAddr +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x21 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x22 +#define PPSMC_MSG_DramLogSetDramSize 0x23 +#define PPSMC_MSG_SetWorkloadMask 0x24 + +#define PPSMC_MSG_GetVoltageByDpm 0x25 // Can be removed +#define PPSMC_MSG_SetVideoFps 0x26 // Can be removed +#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27 + +//Power Gating +#define PPSMC_MSG_AllowGfxOff 0x28 +#define PPSMC_MSG_DisallowGfxOff 0x29 +#define PPSMC_MSG_PowerUpVcn 0x2A +#define PPSMC_MSG_PowerDownVcn 0x2B +#define PPSMC_MSG_PowerUpJpeg 0x2C +#define PPSMC_MSG_PowerDownJpeg 0x2D + +//Resets +#define PPSMC_MSG_PrepareMp1ForUnload 0x2E +#define PPSMC_MSG_Mode1Reset 0x2F + +//Set SystemVirtual DramAddrHigh +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x31 +//ACDC Power Source +#define PPSMC_MSG_SetPptLimit 0x32 +#define PPSMC_MSG_GetPptLimit 0x33 +#define PPSMC_MSG_ReenableAcDcInterrupt 0x34 +#define PPSMC_MSG_NotifyPowerSource 0x35 + +//BTC +#define PPSMC_MSG_RunDcBtc 0x36 + +// 0x37 + +//Others +#define PPSMC_MSG_SetTemperatureInputSelect 0x38 // Can be removed +#define PPSMC_MSG_SetFwDstatesMask 0x39 +#define PPSMC_MSG_SetThrottlerMask 0x3A + +#define PPSMC_MSG_SetExternalClientDfCstateAllow 0x3B + +#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x3C + +//STB to dram log +#define PPSMC_MSG_DumpSTBtoDram 0x3D +#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3E +#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3F +#define PPSMC_MSG_STBtoDramLogSetDramSize 0x40 +#define PPSMC_MSG_SetOBMTraceBufferLogging 0x41 + +#define PPSMC_MSG_AllowGfxDcs 0x43 +#define PPSMC_MSG_DisallowGfxDcs 0x44 +#define PPSMC_MSG_EnableAudioStutterWA 0x45 +#define PPSMC_MSG_PowerUpUmsch 0x46 +#define PPSMC_MSG_PowerDownUmsch 0x47 +#define PPSMC_MSG_SetDcsArch 0x48 +#define PPSMC_MSG_TriggerVFFLR 0x49 +#define PPSMC_MSG_SetNumBadMemoryPagesRetired 0x4A +#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4B +#define PPSMC_MSG_SetPriorityDeltaGain 0x4C +#define PPSMC_MSG_AllowIHHostInterrupt 0x4D +#define PPSMC_MSG_Mode3Reset 0x4F +#define PPSMC_Message_Count 0x50 +#endif -- cgit v1.2.3-70-g09d2 From fefa83fe43ca3c365ea2cdc3f6bf8bddd4e3519d Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Mon, 5 Feb 2024 17:31:35 -0500 Subject: drm/amd/swsmu: add pptable header for smu v14_0_2 Add pptable header for smu v14_0_2. Signed-off-by: Likun Gao Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- .../gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h | 164 +++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h new file mode 100644 index 000000000000..4a3fde89aed7 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h @@ -0,0 +1,164 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_14_0_2_PPTABLE_H +#define SMU_14_0_2_PPTABLE_H + + +#pragma pack(push, 1) + +#define SMU_14_0_2_TABLE_FORMAT_REVISION 3 + +// POWERPLAYTABLE::ulPlatformCaps +#define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. +#define SMU_14_0_2_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 // This cap indicates whether power source notificaiton is done by SBIOS instead of OS. +#define SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC 0x4 // This cap indicates whether DC mode notificaiton is done by GPIO pin directly. +#define SMU_14_0_2_PP_PLATFORM_CAP_BACO 0x8 // This cap indicates whether board supports the BACO circuitry. +#define SMU_14_0_2_PP_PLATFORM_CAP_MACO 0x10 // This cap indicates whether board supports the MACO circuitry. +#define SMU_14_0_2_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 // This cap indicates whether board supports the Shadow Pstate. +#define SMU_14_0_2_PP_PLATFORM_CAP_LEDSUPPORTED 0x40 // This cap indicates whether board supports the LED. +#define SMU_14_0_2_PP_PLATFORM_CAP_MOBILEOVERDRIVE 0x80 // This cap indicates whether board supports the Mobile Overdrive. + +// SMU_14_0_2_PP_THERMALCONTROLLER - Thermal Controller Type +#define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0 + +#define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD +#define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 + +enum SMU_14_0_2_OD_SW_FEATURE_CAP +{ + SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT = 0, + SMU_14_0_2_ODCAP_POWER_MODE = 1, + SMU_14_0_2_ODCAP_AUTO_UV_ENGINE = 2, + SMU_14_0_2_ODCAP_AUTO_OC_ENGINE = 3, + SMU_14_0_2_ODCAP_AUTO_OC_MEMORY = 4, + SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE = 5, + SMU_14_0_2_ODCAP_MANUAL_AC_TIMING = 6, + SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER = 7, + SMU_14_0_2_ODCAP_AUTO_SOC_UV = 8, + SMU_14_0_2_ODCAP_COUNT = 9, +}; + +enum SMU_14_0_2_OD_SW_FEATURE_ID +{ + SMU_14_0_2_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_14_0_2_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, // Auto Fan Acoustic RPM + SMU_14_0_2_ODFEATURE_POWER_MODE = 1 << SMU_14_0_2_ODCAP_POWER_MODE, // Optimized GPU Power Mode + SMU_14_0_2_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_UV_ENGINE, // Auto Under Volt GFXCLK + SMU_14_0_2_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_14_0_2_ODCAP_AUTO_OC_ENGINE, // Auto Over Clock GFXCLK + SMU_14_0_2_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_14_0_2_ODCAP_AUTO_OC_MEMORY, // Auto Over Clock MCLK + SMU_14_0_2_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_14_0_2_ODCAP_MEMORY_TIMING_TUNE, // Auto AC Timing Tuning + SMU_14_0_2_ODFEATURE_MANUAL_AC_TIMING = 1 << SMU_14_0_2_ODCAP_MANUAL_AC_TIMING, // Manual fine grain AC Timing tuning + SMU_14_0_2_ODFEATURE_AUTO_VF_CURVE_OPTIMIZER = 1 << SMU_14_0_2_ODCAP_AUTO_VF_CURVE_OPTIMIZER, // Fine grain auto VF curve tuning + SMU_14_0_2_ODFEATURE_AUTO_SOC_UV = 1 << SMU_14_0_2_ODCAP_AUTO_SOC_UV, // Auto Unver Volt VDDSOC +}; + +#define SMU_14_0_2_MAX_ODFEATURE 32 // Maximum Number of OD Features + +enum SMU_14_0_2_OD_SW_FEATURE_SETTING_ID +{ + SMU_14_0_2_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT = 0, + SMU_14_0_2_ODSETTING_POWER_MODE = 1, + SMU_14_0_2_ODSETTING_AUTOUVENGINE = 2, + SMU_14_0_2_ODSETTING_AUTOOCENGINE = 3, + SMU_14_0_2_ODSETTING_AUTOOCMEMORY = 4, + SMU_14_0_2_ODSETTING_ACTIMING = 5, + SMU_14_0_2_ODSETTING_MANUAL_AC_TIMING = 6, + SMU_14_0_2_ODSETTING_AUTO_VF_CURVE_OPTIMIZER = 7, + SMU_14_0_2_ODSETTING_AUTO_SOC_UV = 8, + SMU_14_0_2_ODSETTING_COUNT = 9, +}; +#define SMU_14_0_2_MAX_ODSETTING 64 // Maximum Number of ODSettings + +enum SMU_14_0_2_PWRMODE_SETTING +{ + SMU_14_0_2_PMSETTING_POWER_LIMIT_QUIET = 0, + SMU_14_0_2_PMSETTING_POWER_LIMIT_BALANCE, + SMU_14_0_2_PMSETTING_POWER_LIMIT_TURBO, + SMU_14_0_2_PMSETTING_POWER_LIMIT_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_TEMP_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_TARGET_RPM_RAGE, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_QUIET, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO, + SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE, +}; +#define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings + +enum SMU_14_0_2_overdrive_table_id +{ + SMU_14_0_2_OVERDRIVE_TABLE_BASIC = 0, + SMU_14_0_2_OVERDRIVE_TABLE_ADVANCED = 1, + SMU_14_0_2_OVERDRIVE_TABLE_COUNT = 2, +}; + +struct smu_14_0_2_overdrive_table +{ + uint8_t revision; // Revision = SMU_14_0_2_PP_OVERDRIVE_VERSION + uint8_t reserve[3]; // Zero filled field reserved for future use + uint8_t cap[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODFEATURE]; // OD feature support flags + int32_t max[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // maximum settings + int32_t min[SMU_14_0_2_OVERDRIVE_TABLE_COUNT][SMU_14_0_2_MAX_ODSETTING]; // minimum settings + int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings +}; + +struct smu_14_0_2_powerplay_table +{ + struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen. + uint8_t table_revision; // PPGen use only: table_revision = 3 + uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t). + uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t) + uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t. + uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable. + uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t. + uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable. + uint16_t pmfw_board_table_size; // The size of BoardTable_t. + uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable. + uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t. + uint32_t golden_pp_id; // PPGen use only: PP Table ID on the Golden Data Base + uint32_t golden_revision; // PPGen use only: PP Table Revision on the Golden Data Base + uint16_t format_id; // PPGen use only: PPTable for different ASICs. + uint32_t platform_caps; // POWERPLAYTABLE::ulPlatformCaps + + uint8_t thermal_controller_type; // one of smu_14_0_2_PP_THERMALCONTROLLER + + uint16_t small_power_limit1; + uint16_t small_power_limit2; + uint16_t boost_power_limit; // For Gemini Board, when the slave adapter is in BACO mode, the master adapter will use this boost power limit instead of the default power limit to boost the power limit. + uint16_t software_shutdown_temp; + + uint8_t reserve[143]; // Zero filled field reserved for future use + + struct smu_14_0_2_overdrive_table overdrive_table; + + PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes +}; + +#pragma pack(pop) + +#endif -- cgit v1.2.3-70-g09d2 From 3e55845c3983d92e28517a545e403b5eb9acf95b Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Tue, 25 Apr 2023 16:53:32 +0800 Subject: drm/amd/swsmu: add smu v14_0_2 support Add initial support for smu v14_0_2. v2: fix warnings (Alex) v3: squash in various fixes (Alex) v4: squash in various fixes (Alex) v5: remove hardcoded pptable id (Alex) v6: update fw version (Alex) v7: squash in more updates (Alex) v8: rebase, squash in pptable override updates, combo table updates, SW CTF support (Alex) Signed-off-by: Kenneth Feng Signed-off-by: Likun Gao Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile | 2 +- drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 40 +- .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c | 1796 ++++++++++++++++++++ .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h | 28 + 5 files changed, 1864 insertions(+), 4 deletions(-) create mode 100644 drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c create mode 100644 drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index be5b24d3dabd..1fc4557e6fb4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -28,7 +28,7 @@ #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6 -#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1 +#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x25 #define FEATURE_MASK(feature) (1ULL << feature) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile index ddbac5c655f7..4593e29e8ff8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o +SMU14_MGR = smu_v14_0.o smu_v14_0_0_ppt.o smu_v14_0_2_ppt.o AMD_SWSMU_SMU14MGR = $(addprefix $(AMD_SWSMU_PATH)/smu14/,$(SMU14_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 33fc0331fac2..3bc9662fbd28 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -172,6 +172,10 @@ int smu_v14_0_init_pptable_microcode(struct smu_context *smu) if (!adev->scpm_enabled) return 0; + if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 2)) || + (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 3))) + return 0; + /* override pptable_id from driver parameter */ if (amdgpu_smu_pptable_id >= 0) { pptable_id = amdgpu_smu_pptable_id; @@ -245,6 +249,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; break; case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; break; default: @@ -895,11 +900,32 @@ static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, return 0; } +#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */ +#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */ + static int smu_v14_0_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - // TODO + struct smu_context *smu = adev->powerplay.pp_handle; + uint32_t client_id = entry->client_id; + uint32_t src_id = entry->src_id; + + if (client_id == SOC15_IH_CLIENTID_THM) { + switch (src_id) { + case THM_11_0__SRCID__THM_DIG_THERM_L2H: + schedule_delayed_work(&smu->swctf_delayed_work, + msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY)); + break; + case THM_11_0__SRCID__THM_DIG_THERM_H2L: + dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n"); + break; + default: + dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n", + src_id); + break; + } + } return 0; } @@ -921,7 +947,17 @@ int smu_v14_0_register_irq_handler(struct smu_context *smu) irq_src->num_types = 1; irq_src->funcs = &smu_v14_0_irq_funcs; - // TODO: THM related + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_L2H, + irq_src); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM, + THM_11_0__SRCID__THM_DIG_THERM_H2L, + irq_src); + if (ret) + return ret; ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, SMU_IH_INTERRUPT_ID_TO_DRIVER, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c new file mode 100644 index 000000000000..706265220292 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -0,0 +1,1796 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#define SWSMU_CODE_LAYER_L2 + +#include +#include +#include +#include "amdgpu.h" +#include "amdgpu_smu.h" +#include "atomfirmware.h" +#include "amdgpu_atomfirmware.h" +#include "amdgpu_atombios.h" +#include "smu_v14_0.h" +#include "smu14_driver_if_v14_0.h" +#include "soc15_common.h" +#include "atom.h" +#include "smu_v14_0_2_ppt.h" +#include "smu_v14_0_2_pptable.h" +#include "smu_v14_0_2_ppsmc.h" +#include "mp/mp_14_0_2_offset.h" +#include "mp/mp_14_0_2_sh_mask.h" + +#include "smu_cmn.h" +#include "amdgpu_ras.h" + +/* + * DO NOT use these for err/warn/info/debug messages. + * Use dev_err, dev_warn, dev_info and dev_dbg instead. + * They are more MGPU friendly. + */ +#undef pr_err +#undef pr_warn +#undef pr_info +#undef pr_debug + +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) + +#define FEATURE_MASK(feature) (1ULL << feature) +#define SMC_DPM_FEATURE ( \ + FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_LINK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | \ + FEATURE_MASK(FEATURE_DPM_FCLK_BIT)) + +#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE 0x4000 + +static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = { + MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), + MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), + MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), + MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), + MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), + MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), + MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), + MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), + MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 1), + MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), + MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), + MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), + MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0), + MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), + MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), + MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), + MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), + MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), + MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), + MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), + MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), + MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), + MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), + MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), + MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), + MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), + MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0), + MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0), + MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), + MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), + MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), + MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), + MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), + MSG_MAP(SetNumBadMemoryPagesRetired, PPSMC_MSG_SetNumBadMemoryPagesRetired, 0), + MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel, + PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, 0), + MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0), + MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), +}; + +static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = { + CLK_MAP(GFXCLK, PPCLK_GFXCLK), + CLK_MAP(SCLK, PPCLK_GFXCLK), + CLK_MAP(SOCCLK, PPCLK_SOCCLK), + CLK_MAP(FCLK, PPCLK_FCLK), + CLK_MAP(UCLK, PPCLK_UCLK), + CLK_MAP(MCLK, PPCLK_UCLK), + CLK_MAP(VCLK, PPCLK_VCLK_0), + CLK_MAP(DCLK, PPCLK_DCLK_0), +}; + +static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = { + FEA_MAP(FW_DATA_READ), + FEA_MAP(DPM_GFXCLK), + FEA_MAP(DPM_GFX_POWER_OPTIMIZER), + FEA_MAP(DPM_UCLK), + FEA_MAP(DPM_FCLK), + FEA_MAP(DPM_SOCCLK), + FEA_MAP(DPM_LINK), + FEA_MAP(DPM_DCN), + FEA_MAP(VMEMP_SCALING), + FEA_MAP(VDDIO_MEM_SCALING), + FEA_MAP(DS_GFXCLK), + FEA_MAP(DS_SOCCLK), + FEA_MAP(DS_FCLK), + FEA_MAP(DS_LCLK), + FEA_MAP(DS_DCFCLK), + FEA_MAP(DS_UCLK), + FEA_MAP(GFX_ULV), + FEA_MAP(FW_DSTATE), + FEA_MAP(GFXOFF), + FEA_MAP(BACO), + FEA_MAP(MM_DPM), + FEA_MAP(SOC_MPCLK_DS), + FEA_MAP(BACO_MPCLK_DS), + FEA_MAP(THROTTLERS), + FEA_MAP(SMARTSHIFT), + FEA_MAP(GTHR), + FEA_MAP(ACDC), + FEA_MAP(VR0HOT), + FEA_MAP(FW_CTF), + FEA_MAP(FAN_CONTROL), + FEA_MAP(GFX_DCS), + FEA_MAP(GFX_READ_MARGIN), + FEA_MAP(LED_DISPLAY), + FEA_MAP(GFXCLK_SPREAD_SPECTRUM), + FEA_MAP(OUT_OF_BAND_MONITOR), + FEA_MAP(OPTIMIZED_VMIN), + FEA_MAP(GFX_IMU), + FEA_MAP(BOOT_TIME_CAL), + FEA_MAP(GFX_PCC_DFLL), + FEA_MAP(SOC_CG), + FEA_MAP(DF_CSTATE), + FEA_MAP(GFX_EDC), + FEA_MAP(BOOT_POWER_OPT), + FEA_MAP(CLOCK_POWER_DOWN_BYPASS), + FEA_MAP(DS_VCN), + FEA_MAP(BACO_CG), + FEA_MAP(MEM_TEMP_READ), + FEA_MAP(ATHUB_MMHUB_PG), + FEA_MAP(SOC_PCC), + [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT}, + [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT}, +}; + +static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = { + TAB_MAP(PPTABLE), + TAB_MAP(WATERMARKS), + TAB_MAP(AVFS_PSM_DEBUG), + TAB_MAP(PMSTATUSLOG), + TAB_MAP(SMU_METRICS), + TAB_MAP(DRIVER_SMU_CONFIG), + TAB_MAP(ACTIVITY_MONITOR_COEFF), + [SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE}, + TAB_MAP(I2C_COMMANDS), + TAB_MAP(ECCINFO), +}; + +static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { + PWR_MAP(AC), + PWR_MAP(DC), +}; + +static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = { + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT, WORKLOAD_PPLIB_DEFAULT_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING, WORKLOAD_PPLIB_POWER_SAVING_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT), + WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT), +}; + +#if 0 +static const uint8_t smu_v14_0_2_throttler_map[] = { + [THROTTLER_PPT0_BIT] = (SMU_THROTTLER_PPT0_BIT), + [THROTTLER_PPT1_BIT] = (SMU_THROTTLER_PPT1_BIT), + [THROTTLER_PPT2_BIT] = (SMU_THROTTLER_PPT2_BIT), + [THROTTLER_PPT3_BIT] = (SMU_THROTTLER_PPT3_BIT), + [THROTTLER_TDC_GFX_BIT] = (SMU_THROTTLER_TDC_GFX_BIT), + [THROTTLER_TDC_SOC_BIT] = (SMU_THROTTLER_TDC_SOC_BIT), + [THROTTLER_TEMP_EDGE_BIT] = (SMU_THROTTLER_TEMP_EDGE_BIT), + [THROTTLER_TEMP_HOTSPOT_BIT] = (SMU_THROTTLER_TEMP_HOTSPOT_BIT), + [THROTTLER_TEMP_MEM_BIT] = (SMU_THROTTLER_TEMP_MEM_BIT), + [THROTTLER_TEMP_VR_GFX_BIT] = (SMU_THROTTLER_TEMP_VR_GFX_BIT), + [THROTTLER_TEMP_VR_SOC_BIT] = (SMU_THROTTLER_TEMP_VR_SOC_BIT), + [THROTTLER_TEMP_VR_MEM0_BIT] = (SMU_THROTTLER_TEMP_VR_MEM0_BIT), + [THROTTLER_TEMP_VR_MEM1_BIT] = (SMU_THROTTLER_TEMP_VR_MEM1_BIT), + [THROTTLER_TEMP_LIQUID0_BIT] = (SMU_THROTTLER_TEMP_LIQUID0_BIT), + [THROTTLER_TEMP_LIQUID1_BIT] = (SMU_THROTTLER_TEMP_LIQUID1_BIT), + [THROTTLER_GFX_APCC_PLUS_BIT] = (SMU_THROTTLER_APCC_BIT), + [THROTTLER_FIT_BIT] = (SMU_THROTTLER_FIT_BIT), +}; +#endif + +static int +smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num) +{ + struct amdgpu_device *adev = smu->adev; + /*u32 smu_version;*/ + + if (num > 2) + return -EINVAL; + + memset(feature_mask, 0xff, sizeof(uint32_t) * num); + + if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) { + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT); + } +#if 0 + if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) || + !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT); + + if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); + + /* PMFW 78.58 contains a critical fix for gfxoff feature */ + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if ((smu_version < 0x004e3a00) || + !(adev->pm.pp_feature & PP_GFXOFF_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT); + + if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT); + } + + if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); + + if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT); + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT); + } + + if (!(adev->pm.pp_feature & PP_ULV_MASK)) + *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT); +#endif + + return 0; +} + +static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; + struct smu_baco_context *smu_baco = &smu->smu_baco; + PPTable_t *pptable = smu->smu_table.driver_pptable; + const OverDriveLimits_t * const overdrive_upperlimits = + &pptable->SkuTable.OverDriveLimitsBasicMax; + const OverDriveLimits_t * const overdrive_lowerlimits = + &pptable->SkuTable.OverDriveLimitsBasicMin; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC) + smu->dc_controlled_by_gpio = true; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) { + smu_baco->platform_support = true; + + if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO) + smu_baco->maco_support = true; + } + + if (!overdrive_lowerlimits->FeatureCtrlMask || + !overdrive_upperlimits->FeatureCtrlMask) + smu->od_enabled = false; + + table_context->thermal_controller_type = + powerplay_table->thermal_controller_type; + + /* + * Instead of having its own buffer space and get overdrive_table copied, + * smu->od_settings just points to the actual overdrive_table + */ + smu->od_settings = &powerplay_table->overdrive_table; + + smu->adev->pm.no_fan = + !(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT)); + + return 0; +} + +static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; + + memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable, + sizeof(PPTable_t)); + + return 0; +} + +#ifndef atom_smc_dpm_info_table_14_0_0 +struct atom_smc_dpm_info_table_14_0_0 { + struct atom_common_table_header table_header; + BoardTable_t BoardTable; +}; +#endif + +static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table; + BoardTable_t *BoardTable = &smc_pptable->BoardTable; + int index, ret; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + smc_dpm_info); + + ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL, + (uint8_t **)&smc_dpm_table); + if (ret) + return ret; + + memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t)); + + return 0; +} + +#if 0 +static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, + void **table, + uint32_t *size) +{ + struct smu_table_context *smu_table = &smu->smu_table; + void *combo_pptable = smu_table->combo_pptable; + int ret = 0; + + ret = smu_cmn_get_combo_pptable(smu); + if (ret) + return ret; + + *table = combo_pptable; + *size = sizeof(struct smu_14_0_powerplay_table); + + return 0; +} +#endif + +static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu, + void **table, + uint32_t *size) +{ + struct smu_table_context *smu_table = &smu->smu_table; + void *combo_pptable = smu_table->combo_pptable; + int ret = 0; + + ret = smu_cmn_get_combo_pptable(smu); + if (ret) + return ret; + + *table = combo_pptable; + *size = sizeof(struct smu_14_0_2_powerplay_table); + + return 0; +} + +static int smu_v14_0_2_setup_pptable(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct amdgpu_device *adev = smu->adev; + int ret = 0; + + if (amdgpu_sriov_vf(smu->adev)) + return 0; + + if (!adev->scpm_enabled) + ret = smu_v14_0_setup_pptable(smu); + else + ret = smu_v14_0_2_get_pptable_from_pmfw(smu, + &smu_table->power_play_table, + &smu_table->power_play_table_size); + if (ret) + return ret; + + ret = smu_v14_0_2_store_powerplay_table(smu); + if (ret) + return ret; + + /* + * With SCPM enabled, the operation below will be handled + * by PSP. Driver involvment is unnecessary and useless. + */ + if (!adev->scpm_enabled) { + ret = smu_v14_0_2_append_powerplay_table(smu); + if (ret) + return ret; + } + + ret = smu_v14_0_2_check_powerplay_table(smu); + if (ret) + return ret; + + return ret; +} + +static int smu_v14_0_2_tables_init(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *tables = smu_table->tables; + + SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, + sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + + smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); + if (!smu_table->metrics_table) + goto err0_out; + smu_table->metrics_time = 0; + + smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3); + smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); + if (!smu_table->gpu_metrics_table) + goto err1_out; + + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); + if (!smu_table->watermarks_table) + goto err2_out; + + smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); + if (!smu_table->ecc_table) + goto err3_out; + + return 0; + +err3_out: + kfree(smu_table->watermarks_table); +err2_out: + kfree(smu_table->gpu_metrics_table); +err1_out: + kfree(smu_table->metrics_table); +err0_out: + return -ENOMEM; +} + +static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + + smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context), + GFP_KERNEL); + if (!smu_dpm->dpm_context) + return -ENOMEM; + + smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context); + + return 0; +} + +static int smu_v14_0_2_init_smc_tables(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_v14_0_2_tables_init(smu); + if (ret) + return ret; + + ret = smu_v14_0_2_allocate_dpm_context(smu); + if (ret) + return ret; + + return smu_v14_0_init_smc_tables(smu); +} + +static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) +{ + struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + struct smu_14_0_dpm_table *dpm_table; + struct smu_14_0_pcie_table *pcie_table; + uint32_t link_level; + int ret = 0; + + /* socclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.soc_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_SOCCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* gfxclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_GFXCLK, + dpm_table); + if (ret) + return ret; + + /* + * Update the reported maximum shader clock to the value + * which can be guarded to be achieved on all cards. This + * is aligned with Window setting. And considering that value + * might be not the peak frequency the card can achieve, it + * is normal some real-time clock frequency can overtake this + * labelled maximum clock frequency(for example in pp_dpm_sclk + * sysfs output). + */ + if (skutable->DriverReportedClocks.GameClockAc && + (dpm_table->dpm_levels[dpm_table->count - 1].value > + skutable->DriverReportedClocks.GameClockAc)) { + dpm_table->dpm_levels[dpm_table->count - 1].value = + skutable->DriverReportedClocks.GameClockAc; + dpm_table->max = skutable->DriverReportedClocks.GameClockAc; + } + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* uclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_UCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* fclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_FCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* vclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.vclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_VCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* dclk dpm table setup */ + dpm_table = &dpm_context->dpm_tables.dclk_table; + if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) { + ret = smu_v14_0_set_single_dpm_table(smu, + SMU_DCLK, + dpm_table); + if (ret) + return ret; + } else { + dpm_table->count = 1; + dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100; + dpm_table->dpm_levels[0].enabled = true; + dpm_table->min = dpm_table->dpm_levels[0].value; + dpm_table->max = dpm_table->dpm_levels[0].value; + } + + /* lclk dpm table setup */ + pcie_table = &dpm_context->dpm_tables.pcie_table; + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + return 0; +} + +static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu) +{ + int ret = 0; + uint64_t feature_enabled; + + ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); + if (ret) + return false; + + return !!(feature_enabled & SMC_DPM_FEATURE); +} + +static void smu_v14_0_2_dump_pptable(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + PFE_Settings_t *PFEsettings = &pptable->PFE_Settings; + + dev_info(smu->adev->dev, "Dumped PPTable:\n"); + + dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version); + dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]); + dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]); +} + +static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics) +{ + uint32_t throttler_status = 0; + int i; + + for (i = 0; i < THROTTLER_COUNT; i++) + throttler_status |= + (metrics->ThrottlingPercentage[i] ? 1U << i : 0); + + return throttler_status; +} + +#define SMU_14_0_2_BUSY_THRESHOLD 5 +static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu, + MetricsMember_t member, + uint32_t *value) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); + int ret = 0; + + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) + return ret; + + switch (member) { + case METRICS_CURR_GFXCLK: + *value = metrics->CurrClock[PPCLK_GFXCLK]; + break; + case METRICS_CURR_SOCCLK: + *value = metrics->CurrClock[PPCLK_SOCCLK]; + break; + case METRICS_CURR_UCLK: + *value = metrics->CurrClock[PPCLK_UCLK]; + break; + case METRICS_CURR_VCLK: + *value = metrics->CurrClock[PPCLK_VCLK_0]; + break; + case METRICS_CURR_DCLK: + *value = metrics->CurrClock[PPCLK_DCLK_0]; + break; + case METRICS_CURR_FCLK: + *value = metrics->CurrClock[PPCLK_FCLK]; + break; + case METRICS_CURR_DCEFCLK: + *value = metrics->CurrClock[PPCLK_DCFCLK]; + break; + case METRICS_AVERAGE_GFXCLK: + if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageGfxclkFrequencyPostDs; + else + *value = metrics->AverageGfxclkFrequencyPreDs; + break; + case METRICS_AVERAGE_FCLK: + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageFclkFrequencyPostDs; + else + *value = metrics->AverageFclkFrequencyPreDs; + break; + case METRICS_AVERAGE_UCLK: + if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD) + *value = metrics->AverageMemclkFrequencyPostDs; + else + *value = metrics->AverageMemclkFrequencyPreDs; + break; + case METRICS_AVERAGE_VCLK: + *value = metrics->AverageVclk0Frequency; + break; + case METRICS_AVERAGE_DCLK: + *value = metrics->AverageDclk0Frequency; + break; + case METRICS_AVERAGE_VCLK1: + *value = metrics->AverageVclk1Frequency; + break; + case METRICS_AVERAGE_DCLK1: + *value = metrics->AverageDclk1Frequency; + break; + case METRICS_AVERAGE_GFXACTIVITY: + *value = metrics->AverageGfxActivity; + break; + case METRICS_AVERAGE_MEMACTIVITY: + *value = metrics->AverageUclkActivity; + break; + case METRICS_AVERAGE_SOCKETPOWER: + *value = metrics->AverageSocketPower << 8; + break; + case METRICS_TEMPERATURE_EDGE: + *value = metrics->AvgTemperature[TEMP_EDGE] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_HOTSPOT: + *value = metrics->AvgTemperature[TEMP_HOTSPOT] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_MEM: + *value = metrics->AvgTemperature[TEMP_MEM] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRGFX: + *value = metrics->AvgTemperature[TEMP_VR_GFX] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_TEMPERATURE_VRSOC: + *value = metrics->AvgTemperature[TEMP_VR_SOC] * + SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; + break; + case METRICS_THROTTLER_STATUS: + *value = smu_v14_0_2_get_throttler_status(metrics); + break; + case METRICS_CURR_FANSPEED: + *value = metrics->AvgFanRpm; + break; + case METRICS_CURR_FANPWM: + *value = metrics->AvgFanPwm; + break; + case METRICS_VOLTAGE_VDDGFX: + *value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX]; + break; + case METRICS_PCIE_RATE: + *value = metrics->PcieRate; + break; + case METRICS_PCIE_WIDTH: + *value = metrics->PcieWidth; + break; + default: + *value = UINT_MAX; + break; + } + + return ret; +} + +static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + struct smu_14_0_dpm_context *dpm_context = + smu->smu_dpm.dpm_context; + struct smu_14_0_dpm_table *dpm_table; + + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + /* uclk dpm table */ + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + /* gfxclk dpm table */ + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + /* socclk dpm table */ + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + /* fclk dpm table */ + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + case SMU_VCLK1: + /* vclk dpm table */ + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + case SMU_DCLK1: + /* dclk dpm table */ + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + dev_err(smu->adev->dev, "Unsupported clock type!\n"); + return -EINVAL; + } + + if (min) + *min = dpm_table->min; + if (max) + *max = dpm_table->max; + + return 0; +} + +static int smu_v14_0_2_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, + uint32_t *size) +{ + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *smc_pptable = table_context->driver_pptable; + int ret = 0; + + switch (sensor) { + case AMDGPU_PP_SENSOR_MAX_FAN_RPM: + *(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm; + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_LOAD: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_MEMACTIVITY, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_GFXACTIVITY, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GPU_AVG_POWER: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_SOCKETPOWER, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_HOTSPOT, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_EDGE_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_EDGE, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_MEM_TEMP: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_TEMPERATURE_MEM, + (uint32_t *)data); + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_CURR_UCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_AVERAGE_GFXCLK, + (uint32_t *)data); + *(uint32_t *)data *= 100; + *size = 4; + break; + case AMDGPU_PP_SENSOR_VDDGFX: + ret = smu_v14_0_2_get_smu_metrics_data(smu, + METRICS_VOLTAGE_VDDGFX, + (uint32_t *)data); + *size = 4; + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value) +{ + MetricsMember_t member_type; + int clk_id = 0; + + clk_id = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_CLK, + clk_type); + if (clk_id < 0) + return -EINVAL; + + switch (clk_id) { + case PPCLK_GFXCLK: + member_type = METRICS_AVERAGE_GFXCLK; + break; + case PPCLK_UCLK: + member_type = METRICS_CURR_UCLK; + break; + case PPCLK_FCLK: + member_type = METRICS_CURR_FCLK; + break; + case PPCLK_SOCCLK: + member_type = METRICS_CURR_SOCCLK; + break; + case PPCLK_VCLK_0: + member_type = METRICS_AVERAGE_VCLK; + break; + case PPCLK_DCLK_0: + member_type = METRICS_AVERAGE_DCLK; + break; + default: + return -EINVAL; + } + + return smu_v14_0_2_get_smu_metrics_data(smu, + member_type, + value); +} + +static int smu_v14_0_2_print_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + char *buf) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_14_0_dpm_table *single_dpm_table; + int i, curr_freq, size = 0; + int ret = 0; + + smu_cmn_get_sysfs_buf(&buf, &size); + + if (amdgpu_ras_intr_triggered()) { + size += sysfs_emit_at(buf, size, "unavailable\n"); + return size; + } + + switch (clk_type) { + case SMU_SCLK: + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + break; + case SMU_MCLK: + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); + break; + case SMU_SOCCLK: + single_dpm_table = &(dpm_context->dpm_tables.soc_table); + break; + case SMU_FCLK: + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); + break; + case SMU_VCLK: + case SMU_VCLK1: + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); + break; + case SMU_DCLK: + case SMU_DCLK1: + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); + break; + default: + break; + } + + switch (clk_type) { + case SMU_SCLK: + case SMU_MCLK: + case SMU_SOCCLK: + case SMU_FCLK: + case SMU_VCLK: + case SMU_VCLK1: + case SMU_DCLK: + case SMU_DCLK1: + ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq); + if (ret) { + dev_err(smu->adev->dev, "Failed to get current clock freq!"); + return ret; + } + + if (single_dpm_table->is_fine_grained) { + /* + * For fine grained dpms, there are only two dpm levels: + * - level 0 -> min clock freq + * - level 1 -> max clock freq + * And the current clock frequency can be any value between them. + * So, if the current clock frequency is not at level 0 or level 1, + * we will fake it as three dpm levels: + * - level 0 -> min clock freq + * - level 1 -> current actual clock freq + * - level 2 -> max clock freq + */ + if ((single_dpm_table->dpm_levels[0].value != curr_freq) && + (single_dpm_table->dpm_levels[1].value != curr_freq)) { + size += sysfs_emit_at(buf, size, "0: %uMhz\n", + single_dpm_table->dpm_levels[0].value); + size += sysfs_emit_at(buf, size, "1: %uMhz *\n", + curr_freq); + size += sysfs_emit_at(buf, size, "2: %uMhz\n", + single_dpm_table->dpm_levels[1].value); + } else { + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + single_dpm_table->dpm_levels[0].value, + single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + single_dpm_table->dpm_levels[1].value, + single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : ""); + } + } else { + for (i = 0; i < single_dpm_table->count; i++) + size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + i, single_dpm_table->dpm_levels[i].value, + single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : ""); + } + break; + case SMU_PCIE: + // TODO + break; + + default: + break; + } + + return size; +} + +static int smu_v14_0_2_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask) +{ + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct smu_14_0_dpm_table *single_dpm_table; + uint32_t soft_min_level, soft_max_level; + uint32_t min_freq, max_freq; + int ret = 0; + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + break; + case SMU_MCLK: + case SMU_UCLK: + single_dpm_table = &(dpm_context->dpm_tables.uclk_table); + break; + case SMU_SOCCLK: + single_dpm_table = &(dpm_context->dpm_tables.soc_table); + break; + case SMU_FCLK: + single_dpm_table = &(dpm_context->dpm_tables.fclk_table); + break; + case SMU_VCLK: + case SMU_VCLK1: + single_dpm_table = &(dpm_context->dpm_tables.vclk_table); + break; + case SMU_DCLK: + case SMU_DCLK1: + single_dpm_table = &(dpm_context->dpm_tables.dclk_table); + break; + default: + break; + } + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + case SMU_MCLK: + case SMU_UCLK: + case SMU_SOCCLK: + case SMU_FCLK: + case SMU_VCLK: + case SMU_VCLK1: + case SMU_DCLK: + case SMU_DCLK1: + if (single_dpm_table->is_fine_grained) { + /* There is only 2 levels for fine grained DPM */ + soft_max_level = (soft_max_level >= 1 ? 1 : 0); + soft_min_level = (soft_min_level >= 1 ? 1 : 0); + } else { + if ((soft_max_level >= single_dpm_table->count) || + (soft_min_level >= single_dpm_table->count)) + return -EINVAL; + } + + min_freq = single_dpm_table->dpm_levels[soft_min_level].value; + max_freq = single_dpm_table->dpm_levels[soft_max_level].value; + + ret = smu_v14_0_set_soft_freq_limited_range(smu, + clk_type, + min_freq, + max_freq); + break; + case SMU_DCEFCLK: + case SMU_PCIE: + default: + break; + } + + return ret; +} + +static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_14_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + uint32_t smu_pcie_arg; + int ret, i; + + for (i = 0; i < pcie_table->num_of_link_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap) + pcie_table->pcie_gen[i] = pcie_gen_cap; + if (pcie_table->pcie_lane[i] > pcie_width_cap) + pcie_table->pcie_lane[i] = pcie_width_cap; + + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + return ret; + } + + return 0; +} + +static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu, + struct smu_temperature_range *range) +{ + // TODO + + return 0; +} + +static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu) +{ + // TODO + + return 0; +} + +static void smu_v14_0_2_get_unique_id(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + SmuMetrics_t *metrics = + &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics); + struct amdgpu_device *adev = smu->adev; + uint32_t upper32 = 0, lower32 = 0; + int ret; + + ret = smu_cmn_get_metrics_table(smu, NULL, false); + if (ret) + goto out; + + upper32 = metrics->PublicSerialNumberUpper; + lower32 = metrics->PublicSerialNumberLower; + +out: + adev->unique_id = ((uint64_t)upper32 << 32) | lower32; +} + +static int smu_v14_0_2_get_power_limit(struct smu_context *smu, + uint32_t *current_power_limit, + uint32_t *default_power_limit, + uint32_t *max_power_limit, + uint32_t *min_power_limit) +{ + // TODO + + return 0; +} + +static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu, + char *buf) +{ + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; + DpmActivityMonitorCoeffInt_t *activity_monitor = + &(activity_monitor_external.DpmActivityMonitorCoeffInt); + static const char *title[] = { + "PROFILE_INDEX(NAME)", + "CLOCK_TYPE(NAME)", + "FPS", + "MinActiveFreqType", + "MinActiveFreq", + "BoosterFreqType", + "BoosterFreq", + "PD_Data_limit_c", + "PD_Data_error_coeff", + "PD_Data_error_rate_coeff"}; + int16_t workload_type = 0; + uint32_t i, size = 0; + int result = 0; + + if (!buf) + return -EINVAL; + + size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n", + title[0], title[1], title[2], title[3], title[4], title[5], + title[6], title[7], title[8], title[9]); + + for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) { + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + i); + if (workload_type == -ENOTSUPP) + continue; + else if (workload_type < 0) + return -EINVAL; + + result = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + workload_type, + (void *)(&activity_monitor_external), + false); + if (result) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return result; + } + + size += sysfs_emit_at(buf, size, "%2d %14s%s:\n", + i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + + size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 0, + "GFXCLK", + activity_monitor->Gfx_FPS, + activity_monitor->Gfx_MinActiveFreqType, + activity_monitor->Gfx_MinActiveFreq, + activity_monitor->Gfx_BoosterFreqType, + activity_monitor->Gfx_BoosterFreq, + activity_monitor->Gfx_PD_Data_limit_c, + activity_monitor->Gfx_PD_Data_error_coeff, + activity_monitor->Gfx_PD_Data_error_rate_coeff); + + size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n", + " ", + 1, + "FCLK", + activity_monitor->Fclk_FPS, + activity_monitor->Fclk_MinActiveFreqType, + activity_monitor->Fclk_MinActiveFreq, + activity_monitor->Fclk_BoosterFreqType, + activity_monitor->Fclk_BoosterFreq, + activity_monitor->Fclk_PD_Data_limit_c, + activity_monitor->Fclk_PD_Data_error_coeff, + activity_monitor->Fclk_PD_Data_error_rate_coeff); + } + + return size; +} + +static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, + long *input, + uint32_t size) +{ + DpmActivityMonitorCoeffIntExternal_t activity_monitor_external; + DpmActivityMonitorCoeffInt_t *activity_monitor = + &(activity_monitor_external.DpmActivityMonitorCoeffInt); + int workload_type, ret = 0; + + smu->power_profile_mode = input[size]; + + if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { + dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); + return -EINVAL; + } + + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + false); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__); + return ret; + } + + switch (input[0]) { + case 0: /* Gfxclk */ + activity_monitor->Gfx_FPS = input[1]; + activity_monitor->Gfx_MinActiveFreqType = input[2]; + activity_monitor->Gfx_MinActiveFreq = input[3]; + activity_monitor->Gfx_BoosterFreqType = input[4]; + activity_monitor->Gfx_BoosterFreq = input[5]; + activity_monitor->Gfx_PD_Data_limit_c = input[6]; + activity_monitor->Gfx_PD_Data_error_coeff = input[7]; + activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8]; + break; + case 1: /* Fclk */ + activity_monitor->Fclk_FPS = input[1]; + activity_monitor->Fclk_MinActiveFreqType = input[2]; + activity_monitor->Fclk_MinActiveFreq = input[3]; + activity_monitor->Fclk_BoosterFreqType = input[4]; + activity_monitor->Fclk_BoosterFreq = input[5]; + activity_monitor->Fclk_PD_Data_limit_c = input[6]; + activity_monitor->Fclk_PD_Data_error_coeff = input[7]; + activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8]; + break; + } + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + WORKLOAD_PPLIB_CUSTOM_BIT, + (void *)(&activity_monitor_external), + true); + if (ret) { + dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__); + return ret; + } + } + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + smu->power_profile_mode); + if (workload_type < 0) + return -EINVAL; + + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetWorkloadMask, + 1 << workload_type, + NULL); +} + +static int smu_v14_0_2_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) + return smu_v14_0_baco_set_armd3_sequence(smu, + smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); + else + return smu_v14_0_baco_enter(smu); +} + +static int smu_v14_0_2_baco_exit(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + return smu_v14_0_baco_exit(smu); + } +} + +static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu) +{ + // TODO + + return true; +} + +static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msg, int num_msgs) +{ + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); + struct amdgpu_device *adev = smu_i2c->adev; + struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = &smu_table->driver_table; + SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; + int i, j, r, c; + u16 dir; + + if (!adev->pm.dpm_enabled) + return -EBUSY; + + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + req->I2CcontrollerPort = smu_i2c->port; + req->I2CSpeed = I2C_SPEED_FAST_400K; + req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ + dir = msg[0].flags & I2C_M_RD; + + for (c = i = 0; i < num_msgs; i++) { + for (j = 0; j < msg[i].len; j++, c++) { + SwI2cCmd_t *cmd = &req->SwI2cCmds[c]; + + if (!(msg[i].flags & I2C_M_RD)) { + /* write */ + cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK; + cmd->ReadWriteData = msg[i].buf[j]; + } + + if ((dir ^ msg[i].flags) & I2C_M_RD) { + /* The direction changes. + */ + dir = msg[i].flags & I2C_M_RD; + cmd->CmdConfig |= CMDCONFIG_RESTART_MASK; + } + + req->NumCmds++; + + /* + * Insert STOP if we are at the last byte of either last + * message for the transaction or the client explicitly + * requires a STOP at this particular message. + */ + if ((j == msg[i].len - 1) && + ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) { + cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK; + cmd->CmdConfig |= CMDCONFIG_STOP_MASK; + } + } + } + mutex_lock(&adev->pm.mutex); + r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); + mutex_unlock(&adev->pm.mutex); + if (r) + goto fail; + + for (c = i = 0; i < num_msgs; i++) { + if (!(msg[i].flags & I2C_M_RD)) { + c += msg[i].len; + continue; + } + for (j = 0; j < msg[i].len; j++, c++) { + SwI2cCmd_t *cmd = &res->SwI2cCmds[c]; + + msg[i].buf[j] = cmd->ReadWriteData; + } + } + r = num_msgs; +fail: + kfree(req); + return r; +} + +static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm smu_v14_0_2_i2c_algo = { + .master_xfer = smu_v14_0_2_i2c_xfer, + .functionality = smu_v14_0_2_i2c_func, +}; + +static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = { + .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN, + .max_read_len = MAX_SW_I2C_COMMANDS, + .max_write_len = MAX_SW_I2C_COMMANDS, + .max_comb_1st_msg_len = 2, + .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, +}; + +static int smu_v14_0_2_i2c_control_init(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int res, i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + smu_i2c->adev = adev; + smu_i2c->port = i; + mutex_init(&smu_i2c->mutex); + control->owner = THIS_MODULE; + control->class = I2C_CLASS_SPD; + control->dev.parent = &adev->pdev->dev; + control->algo = &smu_v14_0_2_i2c_algo; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); + control->quirks = &smu_v14_0_2_i2c_control_quirks; + i2c_set_adapdata(control, smu_i2c); + + res = i2c_add_adapter(control); + if (res) { + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + goto Out_err; + } + } + + /* assign the buses used for the FRU EEPROM and RAS EEPROM */ + /* XXX ideally this would be something in a vbios data table */ + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; + adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; + + return 0; +Out_err: + for ( ; i >= 0; i--) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + return res; +} + +static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + adev->pm.ras_eeprom_i2c_bus = NULL; + adev->pm.fru_eeprom_i2c_bus = NULL; +} + +static int smu_v14_0_2_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + int ret; + + switch (mp1_state) { + case PP_MP1_STATE_UNLOAD: + ret = smu_cmn_set_mp1_state(smu, mp1_state); + break; + default: + /* Ignore others */ + ret = 0; + } + + return ret; +} + +static int smu_v14_0_2_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_DFCstateControl, + state, + NULL); +} + +static int smu_v14_0_2_mode1_reset(struct smu_context *smu) +{ + int ret = 0; + + // TODO + + return ret; +} + +static int smu_v14_0_2_mode2_reset(struct smu_context *smu) +{ + int ret = 0; + + // TODO + + return ret; +} + +static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2)) + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures, + FEATURE_PWR_GFX, NULL); + else + return -EOPNOTSUPP; +} + +static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82); + smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66); + smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90); +} + +static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu, + uint32_t size) +{ + int ret = 0; + + /* message SMU to update the bad page number on SMUBUS */ + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetNumBadMemoryPagesRetired, + size, NULL); + if (ret) + dev_err(smu->adev->dev, + "[%s] failed to message SMU to update bad memory pages number\n", + __func__); + + return ret; +} + +static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu, + uint32_t size) +{ + int ret = 0; + + /* message SMU to update the bad channel info on SMUBUS */ + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel, + size, NULL); + if (ret) + dev_err(smu->adev->dev, + "[%s] failed to message SMU to update bad memory pages channel info\n", + __func__); + + return ret; +} + +static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu, + void *table) +{ + int ret = 0; + + // TODO + + return ret; +} + +static const struct pptable_funcs smu_v14_0_2_ppt_funcs = { + .get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask, + .set_default_dpm_table = smu_v14_0_2_set_default_dpm_table, + .i2c_init = smu_v14_0_2_i2c_control_init, + .i2c_fini = smu_v14_0_2_i2c_control_fini, + .is_dpm_running = smu_v14_0_2_is_dpm_running, + .dump_pptable = smu_v14_0_2_dump_pptable, + .init_microcode = smu_v14_0_init_microcode, + .load_microcode = smu_v14_0_load_microcode, + .fini_microcode = smu_v14_0_fini_microcode, + .init_smc_tables = smu_v14_0_2_init_smc_tables, + .fini_smc_tables = smu_v14_0_fini_smc_tables, + .init_power = smu_v14_0_init_power, + .fini_power = smu_v14_0_fini_power, + .check_fw_status = smu_v14_0_check_fw_status, + .setup_pptable = smu_v14_0_2_setup_pptable, + .check_fw_version = smu_v14_0_check_fw_version, + .write_pptable = smu_cmn_write_pptable, + .set_driver_table_location = smu_v14_0_set_driver_table_location, + .system_features_control = smu_v14_0_system_features_control, + .set_allowed_mask = smu_v14_0_set_allowed_mask, + .get_enabled_mask = smu_cmn_get_enabled_mask, + .dpm_set_vcn_enable = smu_v14_0_set_vcn_enable, + .dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable, + .get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq, + .get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values, + .read_sensor = smu_v14_0_2_read_sensor, + .feature_is_enabled = smu_cmn_feature_is_enabled, + .print_clk_levels = smu_v14_0_2_print_clk_levels, + .force_clk_levels = smu_v14_0_2_force_clk_levels, + .update_pcie_parameters = smu_v14_0_2_update_pcie_parameters, + .get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range, + .register_irq_handler = smu_v14_0_register_irq_handler, + .notify_memory_pool_location = smu_v14_0_notify_memory_pool_location, + .set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range, + .init_pptable_microcode = smu_v14_0_init_pptable_microcode, + .populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk, + .set_performance_level = smu_v14_0_set_performance_level, + .gfx_off_control = smu_v14_0_gfx_off_control, + .get_unique_id = smu_v14_0_2_get_unique_id, + .get_power_limit = smu_v14_0_2_get_power_limit, + .set_power_limit = smu_v14_0_set_power_limit, + .set_power_source = smu_v14_0_set_power_source, + .get_power_profile_mode = smu_v14_0_2_get_power_profile_mode, + .set_power_profile_mode = smu_v14_0_2_set_power_profile_mode, + .run_btc = smu_v14_0_run_btc, + .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, + .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, + .set_tool_table_location = smu_v14_0_set_tool_table_location, + .deep_sleep_control = smu_v14_0_deep_sleep_control, + .gfx_ulv_control = smu_v14_0_gfx_ulv_control, + .get_bamaco_support = smu_v14_0_get_bamaco_support, + .baco_get_state = smu_v14_0_baco_get_state, + .baco_set_state = smu_v14_0_baco_set_state, + .baco_enter = smu_v14_0_2_baco_enter, + .baco_exit = smu_v14_0_2_baco_exit, + .mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported, + .mode1_reset = smu_v14_0_2_mode1_reset, + .mode2_reset = smu_v14_0_2_mode2_reset, + .enable_gfx_features = smu_v14_0_2_enable_gfx_features, + .set_mp1_state = smu_v14_0_2_set_mp1_state, + .set_df_cstate = smu_v14_0_2_set_df_cstate, + .send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num, + .send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag, + .gpo_control = smu_v14_0_gpo_control, + .get_ecc_info = smu_v14_0_2_get_ecc_info, +}; + +void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu) +{ + smu->ppt_funcs = &smu_v14_0_2_ppt_funcs; + smu->message_map = smu_v14_0_2_message_map; + smu->clock_map = smu_v14_0_2_clk_map; + smu->feature_map = smu_v14_0_2_feature_mask_map; + smu->table_map = smu_v14_0_2_table_map; + smu->pwr_src_map = smu_v14_0_2_pwr_src_map; + smu->workload_map = smu_v14_0_2_workload_map; + smu_v14_0_2_set_smu_mailbox_registers(smu); +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h new file mode 100644 index 000000000000..b83729e5d6f9 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h @@ -0,0 +1,28 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU_V14_0_2_PPT_H__ +#define __SMU_V14_0_2_PPT_H__ + +extern void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu); + +#endif -- cgit v1.2.3-70-g09d2 From 6627d845ac33a105625044c8ce8fa0d17cfda40f Mon Sep 17 00:00:00 2001 From: Likun Gao Date: Wed, 26 Apr 2023 15:06:18 +0800 Subject: drm/amd/swsmu: support SMU_14_0_2 ppt_funcs Add smu v14_0_2 ppt fucs support. v2: squash in updates (Alex) Signed-off-by: Likun Gao Reviewed-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index dc2a864b0f51..7789b313285c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -45,6 +45,7 @@ #include "smu_v13_0_6_ppt.h" #include "smu_v13_0_7_ppt.h" #include "smu_v14_0_0_ppt.h" +#include "smu_v14_0_2_ppt.h" #include "amd_pcie.h" /* @@ -715,6 +716,10 @@ static int smu_set_funcs(struct amdgpu_device *adev) case IP_VERSION(14, 0, 1): smu_v14_0_0_set_ppt_funcs(smu); break; + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): + smu_v14_0_2_set_ppt_funcs(smu); + break; default: return -EINVAL; } -- cgit v1.2.3-70-g09d2 From 0c1195ca0d02a3db2599738a944bb6a36f6fa234 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Wed, 31 May 2023 16:09:05 +0800 Subject: drm/amd/swsmu: support smu block discovery for smu v14 Support for smu ip block add for SMU v14. Signed-off-by: Kenneth Feng Reviewed-by: Likun Gao Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 90735e966318..0e31bdb4b7cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1901,6 +1901,8 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) break; case IP_VERSION(14, 0, 0): case IP_VERSION(14, 0, 1): + case IP_VERSION(14, 0, 2): + case IP_VERSION(14, 0, 3): amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block); break; default: -- cgit v1.2.3-70-g09d2 From ca7c4507ba87e9fc22e0ecfa819c3664b3e8287b Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 15 Mar 2024 13:07:53 +0100 Subject: drm/amdgpu: remove invalid resource->start check v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The majority of those where removed in the commit aed01a68047b ("drm/amdgpu: Remove TTM resource->start visible VRAM condition v2") But this one was missed because it's working on the resource and not the BO. Since we also no longer use a fake start address for visible BOs this will now trigger invalid mapping errors. v2: also remove the unused variable Signed-off-by: Christian König Fixes: aed01a68047b ("drm/amdgpu: Remove TTM resource->start visible VRAM condition v2") CC: stable@vger.kernel.org Acked-by: Pierre-Eric Pelloux-Prayer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fc418e670fda..6417cb76ccd4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -557,7 +557,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - size_t bus_size = (size_t)mem->size; switch (mem->mem_type) { case TTM_PL_SYSTEM: @@ -568,9 +567,6 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; - /* check if it's visible */ - if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) - return -EINVAL; if (adev->mman.aper_base_kaddr && mem->placement & TTM_PL_FLAG_CONTIGUOUS) -- cgit v1.2.3-70-g09d2 From 18921b205012568b45760753ad3146ddb9e2d4e2 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 10 Apr 2024 15:52:10 -0400 Subject: drm/amdkfd: Fix memory leak in create_process failure Fix memory leak due to a leaked mmget reference on an error handling code path that is triggered when attempting to create KFD processes while a GPU reset is in progress. Fixes: 0ab2d7532b05 ("drm/amdkfd: prepare per-process debug enable and disable") CC: Xiaogang Chen Signed-off-by: Felix Kuehling Tested-by: Harish Kasiviswanthan Reviewed-by: Mukul Joshi Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 717a60d7a4ea..b79986412cd8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -819,9 +819,9 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) mutex_lock(&kfd_processes_mutex); if (kfd_is_locked()) { - mutex_unlock(&kfd_processes_mutex); pr_debug("KFD is locked! Cannot create process"); - return ERR_PTR(-EINVAL); + process = ERR_PTR(-EINVAL); + goto out; } /* A prior open of /dev/kfd could have already created the process. */ -- cgit v1.2.3-70-g09d2 From 91f10a3d21f2313485178d49efef8a3ba02bd8c7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 29 Mar 2024 18:03:03 -0400 Subject: Revert "drm/amd/display: fix USB-C flag update after enc10 feature init" This reverts commit b5abd7f983e14054593dc91d6df2aa5f8cc67652. This change breaks DSC on 4k monitors at 144Hz over USB-C. Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3254 Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher Cc: Muhammad Ahmed Cc: Tom Chung Cc: Charlene Liu Cc: Hamza Mahfooz Cc: Harry Wentland Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c | 8 +++----- drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c | 4 ++-- 2 files changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index e224a028d68a..8a0460e86309 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -248,14 +248,12 @@ void dcn32_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; - enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; - - enc10->base.features = *enc_features; if (enc10->base.connector.id == CONNECTOR_ID_USBC) enc10->base.features.flags.bits.DP_IS_USB_C = 1; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; enc10->base.transmitter = init_data->transmitter; diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index 81e349d5835b..da94e5309fba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -184,6 +184,8 @@ void dcn35_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; @@ -238,8 +240,6 @@ void dcn35_link_encoder_construct( } enc10->base.features.flags.bits.HDMI_6GB_EN = 1; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; if (bp_funcs->get_connector_speed_cap_info) result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, -- cgit v1.2.3-70-g09d2 From 6fef2d4c00b5b8561ad68dd2b68173f5c6af1e75 Mon Sep 17 00:00:00 2001 From: xinhui pan Date: Thu, 11 Apr 2024 11:11:38 +0800 Subject: drm/amdgpu: validate the parameters of bo mapping operations more clearly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Verify the parameters of amdgpu_vm_bo_(map/replace_map/clearing_mappings) in one common place. Fixes: dc54d3d1744d ("drm/amdgpu: implement AMDGPU_VA_OP_CLEAR v2") Cc: stable@vger.kernel.org Reported-by: Vlad Stolyarov Suggested-by: Christian König Signed-off-by: xinhui pan Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 72 ++++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 4299ce386322..94089069c9ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1613,6 +1613,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, trace_amdgpu_vm_bo_map(bo_va, mapping); } +/* Validate operation parameters to prevent potential abuse */ +static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev, + struct amdgpu_bo *bo, + uint64_t saddr, + uint64_t offset, + uint64_t size) +{ + uint64_t tmp, lpfn; + + if (saddr & AMDGPU_GPU_PAGE_MASK + || offset & AMDGPU_GPU_PAGE_MASK + || size & AMDGPU_GPU_PAGE_MASK) + return -EINVAL; + + if (check_add_overflow(saddr, size, &tmp) + || check_add_overflow(offset, size, &tmp) + || size == 0 /* which also leads to end < begin */) + return -EINVAL; + + /* make sure object fit at this offset */ + if (bo && offset + size > amdgpu_bo_size(bo)) + return -EINVAL; + + /* Ensure last pfn not exceed max_pfn */ + lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT; + if (lpfn >= adev->vm_manager.max_pfn) + return -EINVAL; + + return 0; +} + /** * amdgpu_vm_bo_map - map bo inside a vm * @@ -1639,21 +1670,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; uint64_t eaddr; + int r; - /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) - return -EINVAL; - if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr); if (tmp) { @@ -1706,17 +1730,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, uint64_t eaddr; int r; - /* validate the parameters */ - if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK || size & ~PAGE_MASK) - return -EINVAL; - if (saddr + size <= saddr || offset + size <= offset) - return -EINVAL; - - /* make sure object fit at this offset */ - eaddr = saddr + size - 1; - if ((bo && offset + size > amdgpu_bo_size(bo)) || - (eaddr >= adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT)) - return -EINVAL; + r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size); + if (r) + return r; /* Allocate all the needed memory */ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); @@ -1730,7 +1746,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, } saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; mapping->start = saddr; mapping->last = eaddr; @@ -1817,10 +1833,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *before, *after, *tmp, *next; LIST_HEAD(removed); uint64_t eaddr; + int r; + + r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size); + if (r) + return r; - eaddr = saddr + size - 1; saddr /= AMDGPU_GPU_PAGE_SIZE; - eaddr /= AMDGPU_GPU_PAGE_SIZE; + eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE; /* Allocate all the needed memory */ before = kzalloc(sizeof(*before), GFP_KERNEL); -- cgit v1.2.3-70-g09d2 From a6ff969fe9cbf369e3cd0ac54261fec1122682ec Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 4 Apr 2024 16:25:40 +0200 Subject: drm/amdgpu: fix visible VRAM handling during faults MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When we removed the hacky start code check we actually didn't took into account that *all* VRAM pages needs to be CPU accessible. Clean up the code and unify the handling into a single helper which checks if the whole resource is CPU accessible. The only place where a partial check would make sense is during eviction, but that is neglitible. Signed-off-by: Christian König Fixes: aed01a68047b ("drm/amdgpu: Remove TTM resource->start visible VRAM condition v2") Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher CC: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 22 +++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 22 ----------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 61 +++++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 3 ++ 5 files changed, 53 insertions(+), 57 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 0a4b09709cfb..ec888fc6ead8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -819,7 +819,7 @@ retry: p->bytes_moved += ctx.bytes_moved; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - amdgpu_bo_in_cpu_visible_vram(bo)) + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) p->bytes_moved_vis += ctx.bytes_moved; if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 010b0cb7693c..2099159a693f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -617,8 +617,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, return r; if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && - bo->tbo.resource->mem_type == TTM_PL_VRAM && - amdgpu_bo_in_cpu_visible_vram(bo)) + amdgpu_res_cpu_visible(adev, bo->tbo.resource)) amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, ctx.bytes_moved); else @@ -1272,23 +1271,25 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict) void amdgpu_bo_get_memory(struct amdgpu_bo *bo, struct amdgpu_mem_stats *stats) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_resource *res = bo->tbo.resource; uint64_t size = amdgpu_bo_size(bo); struct drm_gem_object *obj; unsigned int domain; bool shared; /* Abort if the BO doesn't currently have a backing store */ - if (!bo->tbo.resource) + if (!res) return; obj = &bo->tbo.base; shared = drm_gem_object_is_shared_for_memory_stats(obj); - domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); + domain = amdgpu_mem_type_to_domain(res->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: stats->vram += size; - if (amdgpu_bo_in_cpu_visible_vram(bo)) + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) stats->visible_vram += size; if (shared) stats->vram_shared += size; @@ -1389,10 +1390,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* Remember that this BO was accessed by the CPU */ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - if (bo->resource->mem_type != TTM_PL_VRAM) - return 0; - - if (amdgpu_bo_in_cpu_visible_vram(abo)) + if (amdgpu_res_cpu_visible(adev, bo->resource)) return 0; /* Can't move a pinned BO to visible VRAM */ @@ -1415,7 +1413,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* this should never happen */ if (bo->resource->mem_type == TTM_PL_VRAM && - !amdgpu_bo_in_cpu_visible_vram(abo)) + !amdgpu_res_cpu_visible(adev, bo->resource)) return VM_FAULT_SIGBUS; ttm_bo_move_to_lru_tail_unlocked(bo); @@ -1579,6 +1577,7 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, */ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct dma_buf_attachment *attachment; struct dma_buf *dma_buf; const char *placement; @@ -1587,10 +1586,11 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m) if (dma_resv_trylock(bo->tbo.base.resv)) { unsigned int domain; + domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type); switch (domain) { case AMDGPU_GEM_DOMAIN_VRAM: - if (amdgpu_bo_in_cpu_visible_vram(bo)) + if (amdgpu_res_cpu_visible(adev, bo->tbo.resource)) placement = "VRAM VISIBLE"; else placement = "VRAM"; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index be679c42b0b8..fa03d9e4874c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -250,28 +250,6 @@ static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); } -/** - * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM - */ -static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) -{ - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct amdgpu_res_cursor cursor; - - if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM) - return false; - - amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); - while (cursor.remaining) { - if (cursor.start < adev->gmc.visible_vram_size) - return true; - - amdgpu_res_next(&cursor, cursor.size); - } - - return false; -} - /** * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 6417cb76ccd4..1d71729e3f6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -133,7 +133,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && - amdgpu_bo_in_cpu_visible_vram(abo)) { + amdgpu_res_cpu_visible(adev, bo->resource)) { /* Try evicting to the CPU inaccessible part of VRAM * first, but only set GTT as busy placement, so this @@ -403,40 +403,55 @@ error: return r; } -/* - * amdgpu_mem_visible - Check that memory can be accessed by ttm_bo_move_memcpy +/** + * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU + * @adev: amdgpu device + * @res: the resource to check * - * Called by amdgpu_bo_move() + * Returns: true if the full resource is CPU visible, false otherwise. */ -static bool amdgpu_mem_visible(struct amdgpu_device *adev, - struct ttm_resource *mem) +bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, + struct ttm_resource *res) { - u64 mem_size = (u64)mem->size; struct amdgpu_res_cursor cursor; - u64 end; - if (mem->mem_type == TTM_PL_SYSTEM || - mem->mem_type == TTM_PL_TT) + if (!res) + return false; + + if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || + res->mem_type == AMDGPU_PL_PREEMPT) return true; - if (mem->mem_type != TTM_PL_VRAM) + + if (res->mem_type != TTM_PL_VRAM) return false; - amdgpu_res_first(mem, 0, mem_size, &cursor); - end = cursor.start + cursor.size; + amdgpu_res_first(res, 0, res->size, &cursor); while (cursor.remaining) { + if ((cursor.start + cursor.size) >= adev->gmc.visible_vram_size) + return false; amdgpu_res_next(&cursor, cursor.size); + } - if (!cursor.remaining) - break; + return true; +} - /* ttm_resource_ioremap only supports contiguous memory */ - if (end != cursor.start) - return false; +/* + * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy + * + * Called by amdgpu_bo_move() + */ +static bool amdgpu_res_copyable(struct amdgpu_device *adev, + struct ttm_resource *mem) +{ + if (!amdgpu_res_cpu_visible(adev, mem)) + return false; - end = cursor.start + cursor.size; - } + /* ttm_resource_ioremap only supports contiguous memory */ + if (mem->mem_type == TTM_PL_VRAM && + !(mem->placement & TTM_PL_FLAG_CONTIGUOUS)) + return false; - return end <= adev->gmc.visible_vram_size; + return true; } /* @@ -529,8 +544,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (r) { /* Check that all memory is CPU accessible */ - if (!amdgpu_mem_visible(adev, old_mem) || - !amdgpu_mem_visible(adev, new_mem)) { + if (!amdgpu_res_copyable(adev, old_mem) || + !amdgpu_res_copyable(adev, new_mem)) { pr_err("Move buffer fallback to memcpy unavailable\n"); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 65ec82141a8e..32cf6b6f6efd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -139,6 +139,9 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); +bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, + struct ttm_resource *res); + int amdgpu_ttm_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, -- cgit v1.2.3-70-g09d2 From eefc85a2779d75909e769feb7dd056a0bfba4ca7 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Fri, 12 Apr 2024 14:19:36 +0530 Subject: drm:amdgpu: enable IH RB ring1 for IH v6.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need IH ring1 for handling the pagefault interrupts which are overflowing the default ring for specific usecases. Signed-off-by: Sunil Khatri Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/ih_v6_0.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index ad4ad39f128f..26dc99232eb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -549,8 +549,15 @@ static int ih_v6_0_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - adev->irq.ih1.ring_size = 0; - adev->irq.ih2.ring_size = 0; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE, + use_bus_addr); + if (r) + return r; + + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + } /* initialize ih control register offset */ ih_v6_0_init_register_offset(adev); -- cgit v1.2.3-70-g09d2 From 5adcd78fa2bcc458f9786067bcf4a15f9a3f49c9 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Fri, 12 Apr 2024 14:22:16 +0530 Subject: drm:amdgpu: enable IH ring1 for IH v6.1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need IH ring1 for handling the pagefault interrupts which over flow in default ring for specific usecases. Signed-off-by: Sunil Khatri Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/ih_v6_1.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index b8da0fc29378..73dba180fabd 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -550,8 +550,15 @@ static int ih_v6_1_sw_init(void *handle) adev->irq.ih.use_doorbell = true; adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1; - adev->irq.ih1.ring_size = 0; - adev->irq.ih2.ring_size = 0; + if (!(adev->flags & AMD_IS_APU)) { + r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, IH_RING_SIZE, + use_bus_addr); + if (r) + return r; + + adev->irq.ih1.use_doorbell = true; + adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1; + } /* initialize ih control register offset */ ih_v6_1_init_register_offset(adev); -- cgit v1.2.3-70-g09d2 From 5e984b0a3d2a5e0e27cb6c194058d6d9859911d2 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 16 Apr 2024 14:25:26 +0800 Subject: drm/amdgpu: Use driver mode reset for data poison mode-2 reset is the only reliable method that can get GC/SDMA back when poison is consumed. mmhub requires mode-1 reset. Signed-off-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 27 ++++++++----------------- 1 file changed, 8 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index c368c70df3f4..c3beb872adf8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -144,7 +144,7 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { enum amdgpu_ras_block block = 0; - int old_poison, ret = -EINVAL; + int old_poison; uint32_t reset = 0; struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); @@ -163,17 +163,13 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, case SOC15_IH_CLIENTID_SE2SH: case SOC15_IH_CLIENTID_SE3SH: case SOC15_IH_CLIENTID_UTCL2: - ret = kfd_dqm_evict_pasid(dev->dqm, pasid); block = AMDGPU_RAS_BLOCK__GFX; - if (ret) - reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; + reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; case SOC15_IH_CLIENTID_VMC: case SOC15_IH_CLIENTID_VMC1: - ret = kfd_dqm_evict_pasid(dev->dqm, pasid); block = AMDGPU_RAS_BLOCK__MMHUB; - if (ret) - reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; + reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET; break; case SOC15_IH_CLIENTID_SDMA0: case SOC15_IH_CLIENTID_SDMA1: @@ -184,22 +180,15 @@ static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET; break; default: - break; + dev_warn(dev->adev->dev, + "client %d does not support poison consumption\n", client_id); + return; } kfd_signal_poison_consumed_event(dev, pasid); - /* resetting queue passes, do page retirement without gpu reset - * resetting queue fails, fallback to gpu reset solution - */ - if (!ret) - dev_warn(dev->adev->dev, - "RAS poison consumption, unmap queue flow succeeded: client id %d\n", - client_id); - else - dev_warn(dev->adev->dev, - "RAS poison consumption, fall back to gpu reset flow: client id %d\n", - client_id); + dev_warn(dev->adev->dev, + "poison is consumed by client %d, kick off gpu reset flow\n", client_id); amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, block, reset); } -- cgit v1.2.3-70-g09d2 From cba9b630f087005a2c9c201e16b5dbf91d51d3c0 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Fri, 12 Apr 2024 14:47:30 +0530 Subject: drm/amdgpu: add IH_RING1_CFG headers for IH v6.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add offsets, mask and shift macros for IH v6.0 which are needed to configure ring1 client irq redirection. Signed-off-by: Sunil Khatri Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h | 4 ++++ .../gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h | 10 ++++++++++ 2 files changed, 14 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h index 8b931bbabe70..969e006b859b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h @@ -237,6 +237,10 @@ #define regSEM_REGISTER_LAST_PART2_BASE_IDX 0 #define regIH_CLIENT_CFG 0x0184 #define regIH_CLIENT_CFG_BASE_IDX 0 +#define regIH_RING1_CLIENT_CFG_INDEX 0x0185 +#define regIH_RING1_CLIENT_CFG_INDEX_BASE_IDX 0 +#define regIH_RING1_CLIENT_CFG_DATA 0x0186 +#define regIH_RING1_CLIENT_CFG_DATA_BASE_IDX 0 #define regIH_CLIENT_CFG_INDEX 0x0188 #define regIH_CLIENT_CFG_INDEX_BASE_IDX 0 #define regIH_CLIENT_CFG_DATA 0x0189 diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h index f262f44fa68c..a672a91e58f0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h @@ -888,6 +888,16 @@ //IH_CLIENT_CFG #define IH_CLIENT_CFG__TOTAL_CLIENT_NUM__SHIFT 0x0 #define IH_CLIENT_CFG__TOTAL_CLIENT_NUM_MASK 0x0000003FL +//IH_RING1_CLIENT_CFG_INDEX +#define IH_RING1_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 +#define IH_RING1_CLIENT_CFG_INDEX__INDEX_MASK 0x00000007L +//IH_RING1_CLIENT_CFG_DATA +#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID__SHIFT 0x0 +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID__SHIFT 0x8 +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE__SHIFT 0x10 +#define IH_RING1_CLIENT_CFG_DATA__CLIENT_ID_MASK 0x000000FFL +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MASK 0x0000FF00L +#define IH_RING1_CLIENT_CFG_DATA__SOURCE_ID_MATCH_ENABLE_MASK 0x00010000L //IH_CLIENT_CFG_INDEX #define IH_CLIENT_CFG_INDEX__INDEX__SHIFT 0x0 #define IH_CLIENT_CFG_INDEX__INDEX_MASK 0x0000001FL -- cgit v1.2.3-70-g09d2 From ca0afa2f4161dbf82e345144fd0042d00b11eb5b Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Fri, 12 Apr 2024 15:00:20 +0530 Subject: drm/amdgpu: enable redirection of irq's for IH V6.0 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enable redirection of irq for pagefaults for specific clients to avoid overflow without dropping interrupts. So here we redirect the interrupts to another IH ring i.e ring1 where only these interrupts are processed. Reviewed-by: Christian König Signed-off-by: Sunil Khatri Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/ih_v6_0.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c index 26dc99232eb6..c757ef99e3c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c @@ -346,6 +346,21 @@ static int ih_v6_0_irq_init(struct amdgpu_device *adev) DELAY, 3); WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); + /* Redirect the interrupts to IH RB1 for dGPU */ + if (adev->irq.ih1.ring_size) { + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0); + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, + SOURCE_ID_MATCH_ENABLE, 0x1); + + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp); + } + pci_set_master(adev->pdev); /* enable interrupts */ -- cgit v1.2.3-70-g09d2 From ea137071ada1591a05ce0366de350158bf8dd6c7 Mon Sep 17 00:00:00 2001 From: Ahmad Rehman Date: Tue, 16 Apr 2024 13:29:15 -0500 Subject: drm/amdgpu: Skip the coredump collection on reset during driver reload In passthrough environment, the driver triggers the mode-1 reset on reload. The reset causes the core dump collection which is delayed task and prevents driver from unloading until it is completed. Since we do not need to collect data on "reset on reload" case, we can skip core dump collection. v2: Use the same flag to avoid calling amdgpu_reset_reg_dumps as well. Signed-off-by: Ahmad Rehman Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 7 +++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h | 1 + 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1b2e177bc2d6..f3b7cb18fd46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5357,7 +5357,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, /* Try reset handler method first */ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, reset_list); - amdgpu_reset_reg_dumps(tmp_adev); + + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) + amdgpu_reset_reg_dumps(tmp_adev); reset_context->reset_device_list = device_list_handle; r = amdgpu_reset_perform_reset(tmp_adev, reset_context); @@ -5430,7 +5432,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, vram_lost = amdgpu_device_check_vram_lost(tmp_adev); - amdgpu_coredump(tmp_adev, vram_lost, reset_context); + if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) + amdgpu_coredump(tmp_adev, vram_lost, reset_context); if (vram_lost) { DRM_INFO("VRAM is lost due to GPU reset!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 6ea893ad9a36..c512f70b8272 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2481,6 +2481,7 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work) /* Use a common context, just need to make sure full reset is done */ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); + set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags); r = amdgpu_do_asic_reset(&device_list, &reset_context); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index 66125d43cf21..b11d190ece53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -32,6 +32,7 @@ enum AMDGPU_RESET_FLAGS { AMDGPU_NEED_FULL_RESET = 0, AMDGPU_SKIP_HW_RESET = 1, + AMDGPU_SKIP_COREDUMP = 2, }; struct amdgpu_reset_context { -- cgit v1.2.3-70-g09d2 From 93522c19488edc1b347083cd3622a1572d5a95e1 Mon Sep 17 00:00:00 2001 From: Sunil Khatri Date: Fri, 12 Apr 2024 15:05:00 +0530 Subject: drm/amdgpu: enable redirection of irq's for IH V6.1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Enable redirection of irq for pagefaults for specific clients to avoid overflow without dropping interrupts. So here we redirect the interrupts to another IH ring i.e ring1 where only these interrupts are processed. Signed-off-by: Sunil Khatri Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/ih_v6_1.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c index 73dba180fabd..29ed78798070 100644 --- a/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_1.c @@ -346,6 +346,21 @@ static int ih_v6_1_irq_init(struct amdgpu_device *adev) DELAY, 3); WREG32_SOC15(OSSSYS, 0, regIH_MSI_STORM_CTRL, tmp); + /* Redirect the interrupts to IH RB1 for dGPU */ + if (adev->irq.ih1.ring_size) { + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0); + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp); + + tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0); + tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, + SOURCE_ID_MATCH_ENABLE, 0x1); + + WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp); + } + pci_set_master(adev->pdev); /* enable interrupts */ -- cgit v1.2.3-70-g09d2 From 6e7a4176247999b2b30116fb7b41e87f9327dd3d Mon Sep 17 00:00:00 2001 From: Joshua Ashton Date: Thu, 2 Nov 2023 04:21:55 +0000 Subject: drm/amd/display: Set color_mgmt_changed to true on unsuspend Otherwise we can end up with a frame on unsuspend where color management is not applied when userspace has not committed themselves. Fixes re-applying color management on Steam Deck/Gamescope on S3 resume. Signed-off-by: Joshua Ashton Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 75c78f6e2fde..0d67fc56249b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3046,6 +3046,7 @@ static int dm_resume(void *handle) dc_stream_release(dm_new_crtc_state->stream); dm_new_crtc_state->stream = NULL; } + dm_new_crtc_state->base.color_mgmt_changed = true; } for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { -- cgit v1.2.3-70-g09d2 From 8954c3fbe764a42db29bc8d54ff795a105759fe9 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 29 Feb 2024 13:59:19 +0530 Subject: drm/amdgpu: Change AID detection logic On GFX 9.4.3 SOCs, only 2 SDMA instances need to be available to be considered as a valid AID. Signed-off-by: Lijo Lazar Reviewed-by: Asad Kamal Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index fbb43ae7624f..414ea3f560a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -630,7 +630,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) { - u32 mask, inst_mask = adev->sdma.sdma_mask; + u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask; int ret, i; /* generally 1 AID supports 4 instances */ @@ -642,7 +642,9 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask; inst_mask >>= adev->sdma.num_inst_per_aid, ++i) { - if ((inst_mask & mask) == mask) + avail_inst = inst_mask & mask; + if (avail_inst == mask || avail_inst == 0x3 || + avail_inst == 0xc) adev->aid_mask |= (1 << i); } -- cgit v1.2.3-70-g09d2 From 6a009ca1bf94dfe194d6e9cc85a9319b483aac2d Mon Sep 17 00:00:00 2001 From: Zhigang Luo Date: Wed, 17 Apr 2024 10:59:58 -0400 Subject: drm/amdgpu: remove virt_init_data_exchange from poison consumption handler Host will initiate an FLR for all poison consumption. Guest should wait for FLR message to re-init data exchange. Signed-off-by: Zhigang Luo Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 89992c1c9a62..aba00d961627 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -446,8 +446,6 @@ static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev, amdgpu_virt_fini_data_exchange(adev); xgpu_nv_send_access_requests_with_param(adev, IDH_RAS_POISON, block, 0, 0); - if (block != AMDGPU_RAS_BLOCK__SDMA) - amdgpu_virt_init_data_exchange(adev); } } -- cgit v1.2.3-70-g09d2 From 7e38ccb5276fe28f60088181ff4d279a8277b708 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 17 Apr 2024 21:13:59 -0400 Subject: drm/amdkfd: Fix eviction fence handling Handle case that dma_fence_get_rcu_safe returns NULL. If restore work is already scheduled, only update its timer. The same work item cannot be queued twice, so undo the extra queue eviction. Fixes: 9a1c1339abf9 ("drm/amdkfd: Run restore_workers on freezable WQs") Signed-off-by: Felix Kuehling Reviewed-by: Philip Yang Tested-by: Gang BA Reviewed-by: Gang BA Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index b79986412cd8..aafdf064651f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1922,6 +1922,8 @@ static int signal_eviction_fence(struct kfd_process *p) rcu_read_lock(); ef = dma_fence_get_rcu_safe(&p->ef); rcu_read_unlock(); + if (!ef) + return -EINVAL; ret = dma_fence_signal(ef); dma_fence_put(ef); @@ -1949,10 +1951,9 @@ static void evict_process_worker(struct work_struct *work) * they are responsible stopping the queues and scheduling * the restore work. */ - if (!signal_eviction_fence(p)) - queue_delayed_work(kfd_restore_wq, &p->restore_work, - msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); - else + if (signal_eviction_fence(p) || + mod_delayed_work(kfd_restore_wq, &p->restore_work, + msecs_to_jiffies(PROCESS_RESTORE_TIME_MS))) kfd_process_restore_queues(p); pr_debug("Finished evicting pasid 0x%x\n", p->pasid); -- cgit v1.2.3-70-g09d2 From e53a1713de314204f66cc186a74115ea62407876 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Thu, 18 Apr 2024 11:32:34 -0400 Subject: drm/amdgpu: Fix leak when GPU memory allocation fails Free the sync object if the memory allocation fails for any reason. Signed-off-by: Mukul Joshi Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index df58a6a1a67e..7c23ba19af33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1854,6 +1854,7 @@ err_node_allow: err_bo_create: amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id); err_reserve_limit: + amdgpu_sync_free(&(*mem)->sync); mutex_destroy(&(*mem)->lock); if (gobj) drm_gem_object_put(gobj); -- cgit v1.2.3-70-g09d2 From 81bf14519a8ca17af4f057a125d87fabbae90af3 Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Sun, 7 Apr 2024 12:36:00 +0800 Subject: drm/amdkfd: make sure VM is ready for updating operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When page table BOs were evicted but not validated before updating page tables, VM is still in evicting state, amdgpu_vm_update_range returns -EBUSY and restore_process_worker runs into a dead loop. v2: Split the BO validation and page table update into two separate loops in amdgpu_amdkfd_restore_process_bos. (Felix) 1.Validate BOs 2.Validate VM (and DMABuf attachments) 3.Update page tables for the BOs validated above Fixes: 50661eb1a2c8 ("drm/amdgpu: Auto-validate DMABuf imports in compute VMs") Signed-off-by: Lang Yu Acked-by: Christian König Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 34 ++++++++++++++---------- 1 file changed, 20 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 7c23ba19af33..2131de36e3da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -2901,13 +2901,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * amdgpu_sync_create(&sync_obj); - /* Validate BOs and map them to GPUVM (update VM page tables). */ + /* Validate BOs managed by KFD */ list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) { struct amdgpu_bo *bo = mem->bo; uint32_t domain = mem->domain; - struct kfd_mem_attachment *attachment; struct dma_resv_iter cursor; struct dma_fence *fence; @@ -2932,6 +2931,25 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * goto validate_map_fail; } } + } + + if (failed_size) + pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); + + /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO + * validations above would invalidate DMABuf imports again. + */ + ret = process_validate_vms(process_info, &exec.ticket); + if (ret) { + pr_debug("Validating VMs failed, ret: %d\n", ret); + goto validate_map_fail; + } + + /* Update mappings managed by KFD. */ + list_for_each_entry(mem, &process_info->kfd_bo_list, + validate_list) { + struct kfd_mem_attachment *attachment; + list_for_each_entry(attachment, &mem->attachments, list) { if (!attachment->is_mapped) continue; @@ -2948,18 +2966,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu * } } - if (failed_size) - pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size); - - /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO - * validations above would invalidate DMABuf imports again. - */ - ret = process_validate_vms(process_info, &exec.ticket); - if (ret) { - pr_debug("Validating VMs failed, ret: %d\n", ret); - goto validate_map_fail; - } - /* Update mappings not managed by KFD */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { -- cgit v1.2.3-70-g09d2