diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
34 files changed, 678 insertions, 253 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 95aca9b0ef7f..34fc36e77595 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -60,7 +60,7 @@ include $(AMD_DC) DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \ dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \ -dc_link_enc_cfg.o +dc_link_enc_cfg.o dc_link_dpcd.o ifdef CONFIG_DRM_AMD_DC_DCN DISPLAY_CORE += dc_vm_helper.o diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 59d17195bc22..9d1db74de36d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -123,7 +123,7 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, } } -void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr) +void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, struct dc_state *context) { int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz; @@ -132,6 +132,68 @@ void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr) uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider); uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider); + uint32_t current_dispclk_wdivider; + uint32_t i; + + REG_GET(DENTIST_DISPCLK_CNTL, + DENTIST_DISPCLK_WDIVIDER, ¤t_dispclk_wdivider); + + /* When changing divider to or from 127, some extra programming is required to prevent corruption */ + if (current_dispclk_wdivider == 127 && dispclk_wdivider != 127) { + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + uint32_t fifo_level; + struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg; + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; + int32_t N; + int32_t j; + + if (!pipe_ctx->stream) + continue; + /* Virtual encoders don't have this function */ + if (!stream_enc->funcs->get_fifo_cal_average_level) + continue; + fifo_level = stream_enc->funcs->get_fifo_cal_average_level( + stream_enc); + N = fifo_level / 4; + dccg->funcs->set_fifo_errdet_ovr_en( + dccg, + true); + for (j = 0; j < N - 4; j++) + dccg->funcs->otg_drop_pixel( + dccg, + pipe_ctx->stream_res.tg->inst); + dccg->funcs->set_fifo_errdet_ovr_en( + dccg, + false); + } + } else if (dispclk_wdivider == 127 && current_dispclk_wdivider != 127) { + REG_UPDATE(DENTIST_DISPCLK_CNTL, + DENTIST_DISPCLK_WDIVIDER, 126); + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 100); + for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg; + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; + uint32_t fifo_level; + int32_t N; + int32_t j; + + if (!pipe_ctx->stream) + continue; + /* Virtual encoders don't have this function */ + if (!stream_enc->funcs->get_fifo_cal_average_level) + continue; + fifo_level = stream_enc->funcs->get_fifo_cal_average_level( + stream_enc); + N = fifo_level / 4; + dccg->funcs->set_fifo_errdet_ovr_en(dccg, true); + for (j = 0; j < 12 - N; j++) + dccg->funcs->otg_add_pixel(dccg, + pipe_ctx->stream_res.tg->inst); + dccg->funcs->set_fifo_errdet_ovr_en(dccg, false); + } + } REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider); @@ -251,11 +313,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (dpp_clock_lowered) { // if clock is being lowered, increase DTO before lowering refclk dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); - dcn20_update_clocks_update_dentist(clk_mgr); + dcn20_update_clocks_update_dentist(clk_mgr, context); } else { // if clock is being raised, increase refclk before lowering DTO if (update_dppclk || update_dispclk) - dcn20_update_clocks_update_dentist(clk_mgr); + dcn20_update_clocks_update_dentist(clk_mgr, context); // always update dtos unless clock is lowered and not safe to lower dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h index 0b9c045b0c8e..d254d0b6fba1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h @@ -50,7 +50,8 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); -void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr); +void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr, + struct dc_state *context); void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 652fa89fae5f..513676a6f52b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -334,11 +334,11 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, if (dpp_clock_lowered) { /* if clock is being lowered, increase DTO before lowering refclk */ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); - dcn20_update_clocks_update_dentist(clk_mgr); + dcn20_update_clocks_update_dentist(clk_mgr, context); } else { /* if clock is being raised, increase refclk before lowering DTO */ if (update_dppclk || update_dispclk) - dcn20_update_clocks_update_dentist(clk_mgr); + dcn20_update_clocks_update_dentist(clk_mgr, context); /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures * that we do not lower dto when it is not safe to lower. We do not need to * compare the current and new dppclk before calling this function.*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 15f987a63025..9039fb134db5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -409,13 +409,13 @@ void get_surface_tile_visual_confirm_color( struct tg_color *color) { uint32_t color_value = MAX_TG_COLOR_VALUE; - /* Determine the overscan color based on the top-most (desktop) plane's context */ - struct pipe_ctx *top_pipe_ctx = pipe_ctx; + /* Determine the overscan color based on the bottom-most plane's context */ + struct pipe_ctx *bottom_pipe_ctx = pipe_ctx; - while (top_pipe_ctx->top_pipe != NULL) - top_pipe_ctx = top_pipe_ctx->top_pipe; + while (bottom_pipe_ctx->bottom_pipe != NULL) + bottom_pipe_ctx = bottom_pipe_ctx->bottom_pipe; - switch (top_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) { + switch (bottom_pipe_ctx->plane_state->tiling_info.gfx9.swizzle) { case DC_SW_LINEAR: /* LINEAR Surface - set border color to red */ color->color_r_cr = color_value; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 0f91280883a6..9058e45add92 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -49,6 +49,7 @@ #include "dmub/dmub_srv.h" #include "inc/hw/panel_cntl.h" #include "inc/link_enc_cfg.h" +#include "inc/link_dpcd.h" #define DC_LOGGER_INIT(logger) @@ -59,20 +60,6 @@ #define RETIMER_REDRIVER_INFO(...) \ DC_LOG_RETIMER_REDRIVER( \ __VA_ARGS__) -/******************************************************************************* - * Private structures - ******************************************************************************/ - -enum { - PEAK_FACTOR_X1000 = 1006, - /* - * Some receivers fail to train on first try and are good - * on subsequent tries. 2 retries should be plenty. If we - * don't have a successful training then we don't expect to - * ever get one. - */ - LINK_TRAINING_MAX_VERIFY_RETRY = 2 -}; /******************************************************************************* * Private functions @@ -718,11 +705,9 @@ static void read_current_link_settings_on_detect(struct dc_link *link) static bool detect_dp(struct dc_link *link, struct display_sink_capability *sink_caps, - bool *converter_disable_audio, - struct audio_support *audio_support, enum dc_detect_reason reason) { - bool boot = false; + struct audio_support *audio_support = &link->dc->res_pool->audio_support; sink_caps->signal = link_detect_sink(link, reason); sink_caps->transaction_type = @@ -745,60 +730,12 @@ static bool detect_dp(struct dc_link *link, * of this function). */ query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link); #endif - /* - * This call will initiate MST topology discovery. Which - * will detect MST ports and add new DRM connector DRM - * framework. Then read EDID via remote i2c over aux. In - * the end, will notify DRM detect result and save EDID - * into DRM framework. - * - * .detect is called by .fill_modes. - * .fill_modes is called by user mode ioctl - * DRM_IOCTL_MODE_GETCONNECTOR. - * - * .get_modes is called by .fill_modes. - * - * call .get_modes, AMDGPU DM implementation will create - * new dc_sink and add to dc_link. For long HPD plug - * in/out, MST has its own handle. - * - * Therefore, just after dc_create, link->sink is not - * created for MST until user mode app calls - * DRM_IOCTL_MODE_GETCONNECTOR. - * - * Need check ->sink usages in case ->sink = NULL - * TODO: s3 resume check - */ - if (reason == DETECT_REASON_BOOT) - boot = true; - - dm_helpers_dp_update_branch_info(link->ctx, link); - - if (!dm_helpers_dp_mst_start_top_mgr(link->ctx, - link, boot)) { - /* MST not supported */ - link->type = dc_connection_single; - sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; - } } if (link->type != dc_connection_mst_branch && - is_dp_branch_device(link)) { + is_dp_branch_device(link)) /* DP SST branch */ link->type = dc_connection_sst_branch; - if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) { - /* - * SST branch unplug processing for short irq - */ - link_disconnect_sink(link); - return true; - } - - if (is_dp_active_dongle(link) && - (link->dpcd_caps.dongle_type != - DISPLAY_DONGLE_DP_HDMI_CONVERTER)) - *converter_disable_audio = true; - } } else { /* DP passive dongles */ sink_caps->signal = dp_passive_dongle_detection(link->ddc, @@ -893,7 +830,6 @@ static bool dc_link_detect_helper(struct dc_link *link, struct dc_sink *sink = NULL; struct dc_sink *prev_sink = NULL; struct dpcd_caps prev_dpcd_caps; - bool same_dpcd = true; enum dc_connection_type new_connection_type = dc_connection_none; enum dc_connection_type pre_connection_type = dc_connection_none; bool perform_dp_seamless_boot = false; @@ -904,9 +840,10 @@ static bool dc_link_detect_helper(struct dc_link *link, if (dc_is_virtual_signal(link->connector_signal)) return false; - if ((link->connector_signal == SIGNAL_TYPE_LVDS || - link->connector_signal == SIGNAL_TYPE_EDP) && - link->local_sink) { + if (((link->connector_signal == SIGNAL_TYPE_LVDS || + link->connector_signal == SIGNAL_TYPE_EDP) && + (!link->dc->config.allow_edp_hotplug_detection)) && + link->local_sink) { // need to re-write OUI and brightness in resume case if (link->connector_signal == SIGNAL_TYPE_EDP) { dpcd_set_source_specific_data(link); @@ -983,20 +920,59 @@ static bool dc_link_detect_helper(struct dc_link *link, return false; } - if (!detect_dp(link, &sink_caps, - &converter_disable_audio, - aud_support, reason)) { + if (!detect_dp(link, &sink_caps, reason)) { if (prev_sink) dc_sink_release(prev_sink); return false; } - // Check if dpcp block is the same - if (prev_sink) { - if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, - sizeof(struct dpcd_caps))) - same_dpcd = false; + if (link->type == dc_connection_mst_branch) { + LINK_INFO("link=%d, mst branch is now Connected\n", + link->link_index); + /* Need to setup mst link_cap struct here + * otherwise dc_link_detect() will leave mst link_cap + * empty which leads to allocate_mst_payload() has "0" + * pbn_per_slot value leading to exception on dc_fixpt_div() + */ + dp_verify_mst_link_cap(link); + + /* + * This call will initiate MST topology discovery. Which + * will detect MST ports and add new DRM connector DRM + * framework. Then read EDID via remote i2c over aux. In + * the end, will notify DRM detect result and save EDID + * into DRM framework. + * + * .detect is called by .fill_modes. + * .fill_modes is called by user mode ioctl + * DRM_IOCTL_MODE_GETCONNECTOR. + * + * .get_modes is called by .fill_modes. + * + * call .get_modes, AMDGPU DM implementation will create + * new dc_sink and add to dc_link. For long HPD plug + * in/out, MST has its own handle. + * + * Therefore, just after dc_create, link->sink is not + * created for MST until user mode app calls + * DRM_IOCTL_MODE_GETCONNECTOR. + * + * Need check ->sink usages in case ->sink = NULL + * TODO: s3 resume check + */ + + dm_helpers_dp_update_branch_info(link->ctx, link); + if (dm_helpers_dp_mst_start_top_mgr(link->ctx, + link, reason == DETECT_REASON_BOOT)) { + if (prev_sink) + dc_sink_release(prev_sink); + return false; + } else { + link->type = dc_connection_sst_branch; + sink_caps.signal = SIGNAL_TYPE_DISPLAY_PORT; + } } + /* Active SST downstream branch device unplug*/ if (link->type == dc_connection_sst_branch && link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { @@ -1006,31 +982,23 @@ static bool dc_link_detect_helper(struct dc_link *link, return true; } + /* disable audio for non DP to HDMI active sst converter */ + if (link->type == dc_connection_sst_branch && + is_dp_active_dongle(link) && + (link->dpcd_caps.dongle_type != + DISPLAY_DONGLE_DP_HDMI_CONVERTER)) + converter_disable_audio = true; + // link switch from MST to non-MST stop topology manager if (pre_connection_type == dc_connection_mst_branch && - link->type != dc_connection_mst_branch) { + link->type != dc_connection_mst_branch) dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); - } - if (link->type == dc_connection_mst_branch) { - LINK_INFO("link=%d, mst branch is now Connected\n", - link->link_index); - /* Need to setup mst link_cap struct here - * otherwise dc_link_detect() will leave mst link_cap - * empty which leads to allocate_mst_payload() has "0" - * pbn_per_slot value leading to exception on dc_fixpt_div() - */ - dp_verify_mst_link_cap(link); - - if (prev_sink) - dc_sink_release(prev_sink); - return false; - } // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified. if (reason == DETECT_REASON_BOOT && - !dc_ctx->dc->config.power_down_display_on_boot && - link->link_status.link_active) + !dc_ctx->dc->config.power_down_display_on_boot && + link->link_status.link_active) perform_dp_seamless_boot = true; if (perform_dp_seamless_boot) { @@ -1213,11 +1181,11 @@ static bool dc_link_detect_helper(struct dc_link *link, link->dongle_max_pix_clk = 0; } - LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n", + LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n", link->link_index, sink, (sink_caps.signal == SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"), - prev_sink, same_dpcd, same_edid); + prev_sink, same_edid); if (prev_sink) dc_sink_release(prev_sink); @@ -1501,7 +1469,8 @@ static bool dc_link_construct(struct dc_link *link, link->connector_signal = SIGNAL_TYPE_EDP; if (link->hpd_gpio) { - link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + if (!link->dc->config.allow_edp_hotplug_detection) + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; link->irq_source_hpd_rx = dal_irq_get_rx_source(link->hpd_gpio); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 919c94de2a20..5ecbe525b676 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -25,6 +25,8 @@ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; link->ctx->logger #define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ +#include "link_dpcd.h" + /* maximum pre emphasis level allowed for each voltage swing level*/ static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3, @@ -1618,11 +1620,10 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train { enum dc_status status = DC_OK; - if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) - status = configure_lttpr_mode_transparent(link); - - else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) status = configure_lttpr_mode_non_transparent(link, lt_settings); + else + status = configure_lttpr_mode_transparent(link); return status; } @@ -1806,7 +1807,7 @@ bool perform_link_training_with_retries( enum dp_panel_mode panel_mode; struct link_encoder *link_enc; enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; - struct dc_link_settings currnet_setting = *link_setting; + struct dc_link_settings current_setting = *link_setting; /* Dynamically assigned link encoders associated with stream rather than * link. @@ -1832,7 +1833,7 @@ bool perform_link_training_with_retries( link, signal, pipe_ctx->clock_source->id, - &currnet_setting); + ¤t_setting); if (stream->sink_patches.dppowerup_delay > 0) { int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; @@ -1847,12 +1848,12 @@ bool perform_link_training_with_retries( panel_mode != DP_PANEL_MODE_DEFAULT); if (link->aux_access_disabled) { - dc_link_dp_perform_link_training_skip_aux(link, &currnet_setting); + dc_link_dp_perform_link_training_skip_aux(link, ¤t_setting); return true; } else { status = dc_link_dp_perform_link_training( link, - &currnet_setting, + ¤t_setting, skip_video_pattern); if (status == LINK_TRAINING_SUCCESS) return true; @@ -1872,12 +1873,12 @@ bool perform_link_training_with_retries( if (status == LINK_TRAINING_ABORT) break; else if (do_fallback) { - decide_fallback_link_setting(*link_setting, &currnet_setting, status); + decide_fallback_link_setting(*link_setting, ¤t_setting, status); /* Fail link training if reduced link bandwidth no longer meets * stream requirements. */ if (dc_bandwidth_in_kbps_from_timing(&stream->timing) < - dc_link_bandwidth_kbps(link, &currnet_setting)) + dc_link_bandwidth_kbps(link, ¤t_setting)) break; } @@ -3619,79 +3620,16 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) return true; } -static bool retrieve_link_cap(struct dc_link *link) +bool dp_retrieve_lttpr_cap(struct dc_link *link) { - /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, - * which means size 16 will be good for both of those DPCD register block reads - */ - uint8_t dpcd_data[16]; uint8_t lttpr_dpcd_data[6]; - - /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. - */ - uint8_t dpcd_dprx_data = '\0'; - uint8_t dpcd_power_state = '\0'; - - struct dp_device_vendor_id sink_id; - union down_stream_port_count down_strm_port_count; - union edp_configuration_cap edp_config_cap; - union dp_downstream_port_present ds_port = { 0 }; - enum dc_status status = DC_ERROR_UNEXPECTED; - uint32_t read_dpcd_retry_cnt = 3; - int i; - struct dp_sink_hw_fw_revision dp_hw_fw_revision; - bool is_lttpr_present = false; - const uint32_t post_oui_delay = 30; // 30ms bool vbios_lttpr_enable = false; bool vbios_lttpr_interop = false; struct dc_bios *bios = link->dc->ctx->dc_bios; + enum dc_status status = DC_ERROR_UNEXPECTED; + bool is_lttpr_present = false; - memset(dpcd_data, '\0', sizeof(dpcd_data)); memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data)); - memset(&down_strm_port_count, - '\0', sizeof(union down_stream_port_count)); - memset(&edp_config_cap, '\0', - sizeof(union edp_configuration_cap)); - - /* if extended timeout is supported in hardware, - * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer - * CTS 4.2.1.1 regression introduced by CTS specs requirement update. - */ - dc_link_aux_try_to_configure_timeout(link->ddc, - LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); - - status = core_link_read_dpcd(link, DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - - /* Delay 1 ms if AUX CH is in power down state. Based on spec - * section 2.3.1.2, if AUX CH may be powered down due to - * write to DPCD 600h = 2. Sink AUX CH is monitoring differential - * signal and may need up to 1 ms before being able to reply. - */ - if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) - udelay(1000); - - dpcd_set_source_specific_data(link); - /* Sink may need to configure internals based on vendor, so allow some - * time before proceeding with possibly vendor specific transactions - */ - msleep(post_oui_delay); - - for (i = 0; i < read_dpcd_retry_cnt; i++) { - status = core_link_read_dpcd( - link, - DP_DPCD_REV, - dpcd_data, - sizeof(dpcd_data)); - if (status == DC_OK) - break; - } - - if (status != DC_OK) { - dm_error("%s: Read dpcd data failed.\n", __func__); - return false; - } - /* Query BIOS to determine if LTTPR functionality is forced on by system */ if (bios->funcs->get_lttpr_caps) { enum bp_result bp_query_result; @@ -3763,21 +3701,91 @@ static bool retrieve_link_cap(struct dc_link *link) DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ - is_lttpr_present = (link->dpcd_caps.lttpr_caps.phy_repeater_cnt > 0 && - link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff && + is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); - if (is_lttpr_present) + if (is_lttpr_present) { CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); - else + configure_lttpr_mode_transparent(link); + } else link->lttpr_mode = LTTPR_MODE_NON_LTTPR; } + return is_lttpr_present; +} + +static bool retrieve_link_cap(struct dc_link *link) +{ + /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, + * which means size 16 will be good for both of those DPCD register block reads + */ + uint8_t dpcd_data[16]; + /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. + */ + uint8_t dpcd_dprx_data = '\0'; + uint8_t dpcd_power_state = '\0'; + + struct dp_device_vendor_id sink_id; + union down_stream_port_count down_strm_port_count; + union edp_configuration_cap edp_config_cap; + union dp_downstream_port_present ds_port = { 0 }; + enum dc_status status = DC_ERROR_UNEXPECTED; + uint32_t read_dpcd_retry_cnt = 3; + int i; + struct dp_sink_hw_fw_revision dp_hw_fw_revision; + const uint32_t post_oui_delay = 30; // 30ms + bool is_lttpr_present = false; + + memset(dpcd_data, '\0', sizeof(dpcd_data)); + memset(&down_strm_port_count, + '\0', sizeof(union down_stream_port_count)); + memset(&edp_config_cap, '\0', + sizeof(union edp_configuration_cap)); + + /* if extended timeout is supported in hardware, + * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer + * CTS 4.2.1.1 regression introduced by CTS specs requirement update. + */ + dc_link_aux_try_to_configure_timeout(link->ddc, + LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); + + is_lttpr_present = dp_retrieve_lttpr_cap(link); + + status = core_link_read_dpcd(link, DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + /* Delay 1 ms if AUX CH is in power down state. Based on spec + * section 2.3.1.2, if AUX CH may be powered down due to + * write to DPCD 600h = 2. Sink AUX CH is monitoring differential + * signal and may need up to 1 ms before being able to reply. + */ + if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) + udelay(1000); + + dpcd_set_source_specific_data(link); + /* Sink may need to configure internals based on vendor, so allow some + * time before proceeding with possibly vendor specific transactions + */ + msleep(post_oui_delay); + + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPCD_REV, + dpcd_data, + sizeof(dpcd_data)); + if (status == DC_OK) + break; + } + + if (status != DC_OK) { + dm_error("%s: Read receiver caps dpcd data failed.\n", __func__); + return false; + } if (!is_lttpr_present) dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); - { union training_aux_rd_interval aux_rd_interval; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c new file mode 100644 index 000000000000..27ec1e6e9c43 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c @@ -0,0 +1,218 @@ +#include <inc/core_status.h> +#include <dc_link.h> +#include <inc/link_hwss.h> +#include <inc/link_dpcd.h> +#include "drm/drm_dp_helper.h" +#include <dc_dp_types.h> +#include "dm_helpers.h" + +#define END_ADDRESS(start, size) (start + size - 1) +#define ADDRESS_RANGE_SIZE(start, end) (end - start + 1) +struct dpcd_address_range { + uint32_t start; + uint32_t end; +}; + +static enum dc_status internal_link_read_dpcd( + struct dc_link *link, + uint32_t address, + uint8_t *data, + uint32_t size) +{ + if (!link->aux_access_disabled && + !dm_helpers_dp_read_dpcd(link->ctx, + link, address, data, size)) { + return DC_ERROR_UNEXPECTED; + } + + return DC_OK; +} + +static enum dc_status internal_link_write_dpcd( + struct dc_link *link, + uint32_t address, + const uint8_t *data, + uint32_t size) +{ + if (!link->aux_access_disabled && + !dm_helpers_dp_write_dpcd(link->ctx, + link, address, data, size)) { + return DC_ERROR_UNEXPECTED; + } + + return DC_OK; +} + +/* + * Partition the entire DPCD address space + * XXX: This partitioning must cover the entire DPCD address space, + * and must contain no gaps or overlapping address ranges. + */ +static const struct dpcd_address_range mandatory_dpcd_partitions[] = { + { 0, DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1) - 1}, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 }, + /* + * The FEC registers are contiguous + */ + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR8), DP_LTTPR_MAX_ADD }, + /* all remaining DPCD addresses */ + { DP_LTTPR_MAX_ADD + 1, DP_DPCD_MAX_ADD } }; + +static inline bool do_addresses_intersect_with_range( + const struct dpcd_address_range *range, + const uint32_t start_address, + const uint32_t end_address) +{ + return start_address <= range->end && end_address >= range->start; +} + +static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint32_t size) +{ + const uint32_t end_address = END_ADDRESS(address, size); + uint32_t partition_iterator = 0; + + /* + * find current partition + * this loop spins forever if partition map above is not surjective + */ + while (!do_addresses_intersect_with_range(&mandatory_dpcd_partitions[partition_iterator], + address, end_address)) + partition_iterator++; + if (end_address < mandatory_dpcd_partitions[partition_iterator].end) + return size; + return ADDRESS_RANGE_SIZE(address, mandatory_dpcd_partitions[partition_iterator].end); +} + +/* + * Ranges of DPCD addresses that must be read in a single transaction + * XXX: Do not allow any two address ranges in this array to overlap + */ +static const struct dpcd_address_range mandatory_dpcd_blocks[] = { + { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }}; + +/* + * extend addresses to read all mandatory blocks together + */ +static void dpcd_extend_address_range( + const uint32_t in_address, + uint8_t * const in_data, + const uint32_t in_size, + uint32_t *out_address, + uint8_t **out_data, + uint32_t *out_size) +{ + const uint32_t end_address = END_ADDRESS(in_address, in_size); + const struct dpcd_address_range *addr_range; + struct dpcd_address_range new_addr_range; + uint32_t i; + + new_addr_range.start = in_address; + new_addr_range.end = end_address; + for (i = 0; i < ARRAY_SIZE(mandatory_dpcd_blocks); i++) { + addr_range = &mandatory_dpcd_blocks[i]; + if (addr_range->start <= in_address && addr_range->end >= in_address) + new_addr_range.start = addr_range->start; + + if (addr_range->start <= end_address && addr_range->end >= end_address) + new_addr_range.end = addr_range->end; + } + *out_address = in_address; + *out_size = in_size; + *out_data = in_data; + if (new_addr_range.start != in_address || new_addr_range.end != end_address) { + *out_address = new_addr_range.start; + *out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end); + *out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL); + } +} + +/* + * Reduce the AUX reply down to the values the caller requested + */ +static void dpcd_reduce_address_range( + const uint32_t extended_address, + uint8_t * const extended_data, + const uint32_t extended_size, + const uint32_t reduced_address, + uint8_t * const reduced_data, + const uint32_t reduced_size) +{ + const uint32_t reduced_end_address = END_ADDRESS(reduced_address, reduced_size); + const uint32_t extended_end_address = END_ADDRESS(reduced_address, extended_size); + const uint32_t offset = reduced_address - extended_address; + + if (extended_end_address == reduced_end_address && extended_address == reduced_address) + return; /* extended and reduced address ranges point to the same data */ + + memcpy(&extended_data[offset], reduced_data, reduced_size); + kfree(extended_data); +} + +enum dc_status core_link_read_dpcd( + struct dc_link *link, + uint32_t address, + uint8_t *data, + uint32_t size) +{ + uint32_t extended_address; + uint32_t partitioned_address; + uint8_t *extended_data; + uint32_t extended_size; + /* size of the remaining partitioned address space */ + uint32_t size_left_to_read; + enum dc_status status; + /* size of the next partition to be read from */ + uint32_t partition_size; + uint32_t data_index = 0; + + dpcd_extend_address_range(address, data, size, &extended_address, &extended_data, &extended_size); + partitioned_address = extended_address; + size_left_to_read = extended_size; + while (size_left_to_read) { + partition_size = dpcd_get_next_partition_size(partitioned_address, size_left_to_read); + status = internal_link_read_dpcd(link, partitioned_address, &extended_data[data_index], partition_size); + if (status != DC_OK) + break; + partitioned_address += partition_size; + data_index += partition_size; + size_left_to_read -= partition_size; + } + dpcd_reduce_address_range(extended_address, extended_data, extended_size, address, data, size); + return status; +} + +enum dc_status core_link_write_dpcd( + struct dc_link *link, + uint32_t address, + const uint8_t *data, + uint32_t size) +{ + uint32_t partition_size; + uint32_t data_index = 0; + enum dc_status status; + + while (size) { + partition_size = dpcd_get_next_partition_size(address, size); + status = internal_link_write_dpcd(link, address, &data[data_index], partition_size); + if (status != DC_OK) + break; + address += partition_size; + data_index += partition_size; + size -= partition_size; + } + return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index f7dfc8fefdfa..9c51cd09dcf1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -16,6 +16,7 @@ #include "resource.h" #include "link_enc_cfg.h" #include "clk_mgr.h" +#include "inc/link_dpcd.h" static uint8_t convert_to_count(uint8_t lttpr_repeater_count) { @@ -47,36 +48,6 @@ static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset); } -enum dc_status core_link_read_dpcd( - struct dc_link *link, - uint32_t address, - uint8_t *data, - uint32_t size) -{ - if (!link->aux_access_disabled && - !dm_helpers_dp_read_dpcd(link->ctx, - link, address, data, size)) { - return DC_ERROR_UNEXPECTED; - } - - return DC_OK; -} - -enum dc_status core_link_write_dpcd( - struct dc_link *link, - uint32_t address, - const uint8_t *data, - uint32_t size) -{ - if (!link->aux_access_disabled && - !dm_helpers_dp_write_dpcd(link->ctx, - link, address, data, size)) { - return DC_ERROR_UNEXPECTED; - } - - return DC_OK; -} - void dp_receiver_power_ctrl(struct dc_link *link, bool on) { uint8_t state; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 5fe4c5f80b54..3feb19f7e117 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -445,7 +445,7 @@ bool resource_are_vblanks_synchronizable( { uint32_t base60_refresh_rates[] = {10, 20, 5}; uint8_t i; - uint8_t rr_count = sizeof(base60_refresh_rates)/sizeof(base60_refresh_rates[0]); + uint8_t rr_count = ARRAY_SIZE(base60_refresh_rates); uint64_t frame_time_diff; if (stream1->ctx->dc->config.vblank_alignment_dto_params && diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a70697898025..9d924e8496f9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -45,7 +45,7 @@ /* forward declaration */ struct aux_payload; -#define DC_VER "3.2.139" +#define DC_VER "3.2.140" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -303,6 +303,7 @@ struct dc_config { bool multi_mon_pp_mclk_switch; bool disable_dmcu; bool enable_4to1MPC; + bool allow_edp_hotplug_detection; #if defined(CONFIG_DRM_AMD_DC_DCN) bool clamp_min_dcfclk; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 48ca23e1e599..36b6fbcc0441 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -86,6 +86,7 @@ void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv, error: DC_ERROR("Error queuing DMUB command: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); } void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) @@ -95,8 +96,10 @@ void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv) enum dmub_status status; status = dmub_srv_cmd_execute(dmub); - if (status != DMUB_STATUS_OK) + if (status != DMUB_STATUS_OK) { DC_ERROR("Error starting DMUB execution: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } } void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) @@ -106,8 +109,10 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) enum dmub_status status; status = dmub_srv_wait_for_idle(dmub, 100000); - if (status != DMUB_STATUS_OK) + if (status != DMUB_STATUS_OK) { DC_ERROR("Error waiting for DMUB idle: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dc_dmub_srv); + } } void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, @@ -214,3 +219,94 @@ void dc_dmub_trace_event_control(struct dc *dc, bool enable) { dm_helpers_dmub_outbox_interrupt_control(dc->ctx, enable); } + +bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *diag_data) +{ + if (!dc_dmub_srv || !dc_dmub_srv->dmub || !diag_data) + return false; + return dmub_srv_get_diagnostic_data(dc_dmub_srv->dmub, diag_data); +} + +void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dmub_diagnostic_data diag_data = {0}; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) { + DC_LOG_ERROR("%s: invalid parameters.", __func__); + return; + } + + if (!dc_dmub_srv_get_diagnostic_data(dc_dmub_srv, &diag_data)) { + DC_LOG_ERROR("%s: dc_dmub_srv_get_diagnostic_data failed.", __func__); + return; + } + + DC_LOG_DEBUG( + "DMCUB STATE\n" + " dmcub_version : %08x\n" + " scratch [0] : %08x\n" + " scratch [1] : %08x\n" + " scratch [2] : %08x\n" + " scratch [3] : %08x\n" + " scratch [4] : %08x\n" + " scratch [5] : %08x\n" + " scratch [6] : %08x\n" + " scratch [7] : %08x\n" + " scratch [8] : %08x\n" + " scratch [9] : %08x\n" + " scratch [10] : %08x\n" + " scratch [11] : %08x\n" + " scratch [12] : %08x\n" + " scratch [13] : %08x\n" + " scratch [14] : %08x\n" + " scratch [15] : %08x\n" + " pc : %08x\n" + " unk_fault_addr : %08x\n" + " inst_fault_addr : %08x\n" + " data_fault_addr : %08x\n" + " inbox1_rptr : %08x\n" + " inbox1_wptr : %08x\n" + " inbox1_size : %08x\n" + " inbox0_rptr : %08x\n" + " inbox0_wptr : %08x\n" + " inbox0_size : %08x\n" + " is_enabled : %d\n" + " is_soft_reset : %d\n" + " is_secure_reset : %d\n" + " is_traceport_en : %d\n" + " is_cw0_en : %d\n" + " is_cw6_en : %d\n", + diag_data.dmcub_version, + diag_data.scratch[0], + diag_data.scratch[1], + diag_data.scratch[2], + diag_data.scratch[3], + diag_data.scratch[4], + diag_data.scratch[5], + diag_data.scratch[6], + diag_data.scratch[7], + diag_data.scratch[8], + diag_data.scratch[9], + diag_data.scratch[10], + diag_data.scratch[11], + diag_data.scratch[12], + diag_data.scratch[13], + diag_data.scratch[14], + diag_data.scratch[15], + diag_data.pc, + diag_data.undefined_address_fault_addr, + diag_data.inst_fetch_fault_addr, + diag_data.data_write_fault_addr, + diag_data.inbox1_rptr, + diag_data.inbox1_wptr, + diag_data.inbox1_size, + diag_data.inbox0_rptr, + diag_data.inbox0_wptr, + diag_data.inbox0_size, + diag_data.is_dmcub_enabled, + diag_data.is_dmcub_soft_reset, + diag_data.is_dmcub_secure_reset, + diag_data.is_traceport_en, + diag_data.is_cw0_enabled, + diag_data.is_cw6_enabled); +} diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index f5489c7aa770..0d5680198937 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -71,4 +71,8 @@ void dc_dmub_trace_event_control(struct dc *dc, bool enable); void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data); +bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca); + +void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv); + #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 83d97dfe328f..28631714f697 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -615,7 +615,8 @@ int dce_aux_transfer_dmub_raw(struct ddc_service *ddc, } #define AUX_MAX_RETRIES 7 -#define AUX_MAX_DEFER_RETRIES 7 +#define AUX_MIN_DEFER_RETRIES 7 +#define AUX_MAX_DEFER_TIMEOUT_MS 50 #define AUX_MAX_I2C_DEFER_RETRIES 7 #define AUX_MAX_INVALID_REPLY_RETRIES 2 #define AUX_MAX_TIMEOUT_RETRIES 3 @@ -628,6 +629,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, bool payload_reply = true; enum aux_return_code_type operation_result; bool retry_on_defer = false; + struct ddc *ddc_pin = ddc->ddc_pin; + struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; + struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); + uint32_t defer_time_in_ms = 0; int aux_ack_retries = 0, aux_defer_retries = 0, @@ -660,19 +665,27 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, break; case AUX_TRANSACTION_REPLY_AUX_DEFER: + /* polling_timeout_period is in us */ + defer_time_in_ms += aux110->polling_timeout_period / 1000; + ++aux_defer_retries; + /* fall through */ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: retry_on_defer = true; fallthrough; case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: - if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { + if (aux_defer_retries >= AUX_MIN_DEFER_RETRIES + && defer_time_in_ms >= AUX_MAX_DEFER_TIMEOUT_MS) { goto fail; } else { if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) || (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) { - if (payload->defer_delay > 1) + if (payload->defer_delay > 1) { msleep(payload->defer_delay); - else if (payload->defer_delay <= 1) + defer_time_in_ms += payload->defer_delay; + } else if (payload->defer_delay <= 1) { udelay(payload->defer_delay * 1000); + defer_time_in_ms += payload->defer_delay; + } } } break; @@ -701,7 +714,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, // Check whether a DEFER had occurred before the timeout. // If so, treat timeout as a DEFER. if (retry_on_defer) { - if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) + if (++aux_defer_retries >= AUX_MIN_DEFER_RETRIES) goto fail; else if (payload->defer_delay > 0) msleep(payload->defer_delay); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 53dd305fa6b0..d76e19535c66 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -65,7 +65,6 @@ #include "atomfirmware.h" -#include "dce110_hw_sequencer.h" #include "dcn10/dcn10_hw_sequencer.h" #define GAMMA_HW_POINTS_NUM 256 diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 3b175af97388..5d54900f7b61 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -54,6 +54,8 @@ #include "dce/dmub_hw_lock_mgr.h" #include "dc_trace.h" #include "dce/dmub_outbox.h" +#include "inc/dc_link_dp.h" +#include "inc/link_dpcd.h" #define DC_LOGGER_INIT(logger) @@ -1403,6 +1405,9 @@ void dcn10_init_hw(struct dc *dc) if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) continue; + /* DP 2.0 requires that LTTPR Caps be read first */ + dp_retrieve_lttpr_cap(dc->links[i]); + /* * If any of the displays are lit up turn them off. * The reason is that some MST hubs cannot be turned off diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 76b334644f9e..0d86df97878c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -52,6 +52,7 @@ SRI(AFMT_60958_1, DIG, id), \ SRI(AFMT_60958_2, DIG, id), \ SRI(DIG_FE_CNTL, DIG, id), \ + SRI(DIG_FIFO_STATUS, DIG, id), \ SRI(HDMI_CONTROL, DIG, id), \ SRI(HDMI_DB_CONTROL, DIG, id), \ SRI(HDMI_GC, DIG, id), \ @@ -124,6 +125,7 @@ struct dcn10_stream_enc_registers { uint32_t AFMT_60958_2; uint32_t DIG_FE_CNTL; uint32_t DIG_FE_CNTL2; + uint32_t DIG_FIFO_STATUS; uint32_t DP_MSE_RATE_CNTL; uint32_t DP_MSE_RATE_UPDATE; uint32_t DP_PIXEL_FORMAT; @@ -266,6 +268,17 @@ struct dcn10_stream_enc_registers { SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_LEVEL_ERROR, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_USE_OVERWRITE_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_OVERWRITE_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_ERROR_ACK, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CAL_AVERAGE_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MAXIMUM_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MINIMUM_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_READ_CLOCK_SRC, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CALIBRATED, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECAL_AVERAGE, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECOMP_MINMAX, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\ @@ -488,6 +501,17 @@ struct dcn10_stream_enc_registers { type DP_VID_N_MUL;\ type DP_VID_M_DOUBLE_VALUE_EN;\ type DIG_SOURCE_SELECT;\ + type DIG_FIFO_LEVEL_ERROR;\ + type DIG_FIFO_USE_OVERWRITE_LEVEL;\ + type DIG_FIFO_OVERWRITE_LEVEL;\ + type DIG_FIFO_ERROR_ACK;\ + type DIG_FIFO_CAL_AVERAGE_LEVEL;\ + type DIG_FIFO_MAXIMUM_LEVEL;\ + type DIG_FIFO_MINIMUM_LEVEL;\ + type DIG_FIFO_READ_CLOCK_SRC;\ + type DIG_FIFO_CALIBRATED;\ + type DIG_FIFO_FORCE_RECAL_AVERAGE;\ + type DIG_FIFO_FORCE_RECOMP_MINMAX;\ type DIG_CLOCK_PATTERN #define SE_REG_FIELD_LIST_DCN2_0(type) \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 4075ae111530..e6307397e0d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -552,6 +552,17 @@ void enc2_stream_encoder_dp_set_stream_attribute( DP_SST_SDP_SPLITTING, enable_sdp_splitting); } +uint32_t enc2_get_fifo_cal_average_level( + struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t fifo_level; + + REG_GET(DIG_FIFO_STATUS, + DIG_FIFO_CAL_AVERAGE_LEVEL, &fifo_level); + return fifo_level; +} + static const struct stream_encoder_funcs dcn20_str_enc_funcs = { .dp_set_odm_combine = enc2_dp_set_odm_combine, @@ -598,6 +609,7 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { .dp_set_dsc_pps_info_packet = enc2_dp_set_dsc_pps_info_packet, .set_dynamic_metadata = enc2_set_dynamic_metadata, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, + .get_fifo_cal_average_level = enc2_get_fifo_cal_average_level, }; void dcn20_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h index 9a881e639709..f3d1a0237bda 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h @@ -112,4 +112,7 @@ void enc2_set_dynamic_metadata(struct stream_encoder *enc, uint32_t hubp_requestor_id, enum dynamic_metadata_mode dmdata_mode); +uint32_t enc2_get_fifo_cal_average_level( + struct stream_encoder *enc); + #endif /* __DC_STREAM_ENCODER_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index 72bee637c1e4..8487516819ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -823,6 +823,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = { .dp_set_dsc_pps_info_packet = enc3_dp_set_dsc_pps_info_packet, .set_dynamic_metadata = enc2_set_dynamic_metadata, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, + + .get_fifo_cal_average_level = enc2_get_fifo_cal_average_level, }; void dcn30_dio_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h index 9566b9037458..e2c264ecb20f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h @@ -106,6 +106,7 @@ SRI(DP_SEC_METADATA_TRANSMISSION, DP, id), \ SRI(HDMI_METADATA_PACKET_CONTROL, DIG, id), \ SRI(DIG_FE_CNTL, DIG, id), \ + SRI(DIG_FIFO_STATUS, DIG, id), \ SRI(DIG_CLOCK_PATTERN, DIG, id) @@ -167,6 +168,17 @@ SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\ SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_LEVEL_ERROR, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_USE_OVERWRITE_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_OVERWRITE_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_ERROR_ACK, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CAL_AVERAGE_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MAXIMUM_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_MINIMUM_LEVEL, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_READ_CLOCK_SRC, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_CALIBRATED, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECAL_AVERAGE, mask_sh),\ + SE_SF(DIG0_DIG_FIFO_STATUS, DIG_FIFO_FORCE_RECOMP_MINMAX, mask_sh),\ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\ SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index ef5d0b778a72..5642172e0df8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -48,6 +48,8 @@ #include "dc_dmub_srv.h" #include "link_hwss.h" #include "dpcd_defs.h" +#include "inc/dc_link_dp.h" +#include "inc/link_dpcd.h" @@ -529,6 +531,8 @@ void dcn30_init_hw(struct dc *dc) for (i = 0; i < dc->link_count; i++) { if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) continue; + /* DP 2.0 states that LTTPR regs must be read first */ + dp_retrieve_lttpr_cap(dc->links[i]); /* if any of the displays are lit up turn them off */ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c index dc33ec8b7bdb..b48b732aa647 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.c @@ -38,3 +38,8 @@ void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool po { /*DCN303 removes PG registers*/ } + +void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable) +{ + /*DCN303 removes PG registers*/ +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h index fc6cab720b6d..8b69a3b76c11 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_hwseq.h @@ -13,5 +13,6 @@ void dcn303_dpp_pg_control(struct dce_hwseq *hws, unsigned int dpp_inst, bool power_on); void dcn303_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on); void dcn303_dsc_pg_control(struct dce_hwseq *hws, unsigned int dsc_inst, bool power_on); +void dcn303_enable_power_gating_plane(struct dce_hwseq *hws, bool enable); #endif /* __DC_HWSS_DCN303_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c index 86d4b303d02f..aa5dbbade2bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c @@ -16,4 +16,5 @@ void dcn303_hw_sequencer_construct(struct dc *dc) dc->hwseq->funcs.dpp_pg_control = dcn303_dpp_pg_control; dc->hwseq->funcs.hubp_pg_control = dcn303_hubp_pg_control; dc->hwseq->funcs.dsc_pg_control = dcn303_dsc_pg_control; + dc->hwseq->funcs.enable_power_gating_plane = dcn303_enable_power_gating_plane; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index c0e544d7556f..cf1779588f96 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -33,7 +33,6 @@ #include "clk_mgr.h" #include "reg_helper.h" #include "abm.h" -#include "clk_mgr.h" #include "hubp.h" #include "dchubbub.h" #include "timing_generator.h" @@ -47,6 +46,7 @@ #include "dpcd_defs.h" #include "dce/dmub_outbox.h" #include "dc_link_dp.h" +#include "inc/link_dpcd.h" #define DC_LOGGER_INIT(logger) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 0d6cb6caad81..c67bc9544f5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -934,7 +934,6 @@ static const struct dc_debug_options debug_defaults_drv = { .dmub_command_table = true, .pstate_enabled = true, .use_max_lb = true, - .pstate_enabled = true, .enable_mem_low_power = { .bits = { .vga = false, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index d655655baaba..06fac59a3d40 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -3536,7 +3536,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes( *BytePerPixelDETC = 0; *BytePerPixelY = 4; *BytePerPixelC = 0; - } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) { + } else if (SourcePixelFormat == dm_444_16) { *BytePerPixelDETY = 2; *BytePerPixelDETC = 0; *BytePerPixelY = 2; @@ -5674,7 +5674,7 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l for (k = 0; k < v->NumberOfActivePlanes; k++) { if (v->ViewportWidth[k] > v->SurfaceWidthY[k] || v->ViewportHeight[k] > v->SurfaceHeightY[k]) { ViewportExceedsSurface = true; - if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16 + if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) { if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 345d2d409a6e..0ea9b18662e3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -159,7 +159,6 @@ dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_l_in_us, mode_lib->vba.TimeP dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimePerChromaMetaChunkVBlank); dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip); dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip); - dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup); dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix); dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix); @@ -419,7 +418,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) visited[j] = true; mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes; - mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1; mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] = (enum scan_direction_class) (src->source_scan); diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c index 51855a2624cf..4233955e3c47 100644 --- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -33,6 +33,7 @@ #include "core_types.h" #include "dc_link_ddc.h" #include "link_hwss.h" +#include "inc/link_dpcd.h" #define DC_LOGGER \ link->ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 883c3af51022..e2b58ec9912d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -42,7 +42,15 @@ enum { /* to avoid infinite loop where-in the receiver * switches between different VS */ - LINK_TRAINING_MAX_CR_RETRY = 100 + LINK_TRAINING_MAX_CR_RETRY = 100, + /* + * Some receivers fail to train on first try and are good + * on subsequent tries. 2 retries should be plenty. If we + * don't have a successful training then we don't expect to + * ever get one. + */ + LINK_TRAINING_MAX_VERIFY_RETRY = 2, + PEAK_FACTOR_X1000 = 1006, }; bool dp_verify_link_cap( @@ -182,4 +190,5 @@ enum dc_status dpcd_configure_lttpr_mode( struct link_training_settings *lt_settings); enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings); +bool dp_retrieve_lttpr_cap(struct dc_link *link); #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 47c7e4c3a51b..564ea6a727b0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -237,6 +237,9 @@ struct stream_encoder_funcs { void (*dp_set_odm_combine)( struct stream_encoder *enc, bool odm_combine); + + uint32_t (*get_fifo_cal_average_level)( + struct stream_encoder *enc); }; #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h new file mode 100644 index 000000000000..d4d52ef1b165 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h @@ -0,0 +1,18 @@ +#ifndef __LINK_DPCD_H__ +#define __LINK_DPCD_H__ +#include <inc/core_status.h> +#include <dc_link.h> +#include <inc/link_hwss.h> + +enum dc_status core_link_read_dpcd( + struct dc_link *link, + uint32_t address, + uint8_t *data, + uint32_t size); + +enum dc_status core_link_write_dpcd( + struct dc_link *link, + uint32_t address, + const uint8_t *data, + uint32_t size); +#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index 33590a728fc5..fc1d289bb9fe 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -26,20 +26,6 @@ #ifndef __DC_LINK_HWSS_H__ #define __DC_LINK_HWSS_H__ -#include "inc/core_status.h" - -enum dc_status core_link_read_dpcd( - struct dc_link *link, - uint32_t address, - uint8_t *data, - uint32_t size); - -enum dc_status core_link_write_dpcd( - struct dc_link *link, - uint32_t address, - const uint8_t *data, - uint32_t size); - struct gpio *get_hpd_gpio(struct dc_bios *dcb, struct graphics_object_id link_id, struct gpio_service *gpio_service); |