diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
154 files changed, 5932 insertions, 1858 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c911b30de658..7f9773f8dab6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -51,6 +51,7 @@  #include <drm/drm_hdcp.h>  #endif  #include "amdgpu_pm.h" +#include "amdgpu_atombios.h"  #include "amd_shared.h"  #include "amdgpu_dm_irq.h" @@ -623,7 +624,7 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)  #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */  /** - * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command. + * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.   * @adev: amdgpu_device pointer   * @notify: dmub notification structure   * @@ -631,7 +632,8 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)   * Copies dmub notification to DM which is to be read by AUX command.   * issuing thread and also signals the event to wake up the thread.   */ -void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, +					struct dmub_notification *notify)  {  	if (adev->dm.dmub_notify)  		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); @@ -647,7 +649,8 @@ void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notific   * Dmub Hpd interrupt processing callback. Gets displayindex through the   * ink index and calls helper to do the processing.   */ -void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify) +static void dmub_hpd_callback(struct amdgpu_device *adev, +			      struct dmub_notification *notify)  {  	struct amdgpu_dm_connector *aconnector;  	struct amdgpu_dm_connector *hpd_aconnector = NULL; @@ -655,7 +658,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not  	struct drm_connector_list_iter iter;  	struct dc_link *link;  	uint8_t link_index = 0; -	struct drm_device *dev = adev->dm.ddev; +	struct drm_device *dev;  	if (adev == NULL)  		return; @@ -672,6 +675,7 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not  	link_index = notify->link_index;  	link = adev->dm.dc->links[link_index]; +	dev = adev->dm.ddev;  	drm_connector_list_iter_begin(dev, &iter);  	drm_for_each_connector_iter(connector, &iter) { @@ -704,8 +708,10 @@ void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *not   * to dmub interrupt handling thread   * Return: true if successfully registered, false if there is existing registration   */ -bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type, -dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload) +static bool register_dmub_notify_callback(struct amdgpu_device *adev, +					  enum dmub_notification_type type, +					  dmub_notify_interrupt_callback_t callback, +					  bool dmub_int_thread_offload)  {  	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {  		adev->dm.dmub_callback[type] = callback; @@ -789,8 +795,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)  					plink = adev->dm.dc->links[notify.link_index];  					if (plink) {  						plink->hpd_status = -							notify.hpd_status == -							DP_HPD_PLUG ? true : false; +							notify.hpd_status == DP_HPD_PLUG;  					}  				}  				queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); @@ -1050,6 +1055,11 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)  		return 0;  	} +	/* Reset DMCUB if it was previously running - before we overwrite its memory. */ +	status = dmub_srv_hw_reset(dmub_srv); +	if (status != DMUB_STATUS_OK) +		DRM_WARN("Error resetting DMUB HW: %d\n", status); +  	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;  	fw_inst_const = dmub_fw->data + @@ -1152,6 +1162,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)  	return 0;  } +static void dm_dmub_hw_resume(struct amdgpu_device *adev) +{ +	struct dmub_srv *dmub_srv = adev->dm.dmub_srv; +	enum dmub_status status; +	bool init; + +	if (!dmub_srv) { +		/* DMUB isn't supported on the ASIC. */ +		return; +	} + +	status = dmub_srv_is_hw_init(dmub_srv, &init); +	if (status != DMUB_STATUS_OK) +		DRM_WARN("DMUB hardware init check failed: %d\n", status); + +	if (status == DMUB_STATUS_OK && init) { +		/* Wait for firmware load to finish. */ +		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); +		if (status != DMUB_STATUS_OK) +			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); +	} else { +		/* Perform the full hardware initialization. */ +		dm_dmub_hw_init(adev); +	} +} +  #if defined(CONFIG_DRM_AMD_DC_DCN)  static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)  { @@ -1453,8 +1489,21 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)  		init_data.flags.edp_no_power_sequencing = true; +#ifdef CONFIG_DRM_AMD_DC_DCN +	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) +		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; +	if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) +		init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; +#endif +  	init_data.flags.power_down_display_on_boot = true; +	if (check_seamless_boot_capability(adev)) { +		init_data.flags.power_down_display_on_boot = false; +		init_data.flags.allow_seamless_boot_optimization = true; +		DRM_INFO("Seamless boot condition check passed\n"); +	} +  	INIT_LIST_HEAD(&adev->dm.da_list);  	/* Display Core create. */  	adev->dm.dc = dc_create(&init_data); @@ -1479,8 +1528,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)  		adev->dm.dc->debug.disable_stutter = true; -	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) +	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {  		adev->dm.dc->debug.disable_dsc = true; +		adev->dm.dc->debug.disable_dsc_edp = true; +	}  	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)  		adev->dm.dc->debug.disable_clock_gate = true; @@ -2303,14 +2354,6 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)  			goto fail;  	} - -	res = dc_validate_global_state(dc, context, false); - -	if (res != DC_OK) { -		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res); -		goto fail; -	} -  	res = dc_commit_state(dc, context);  fail: @@ -2561,6 +2604,23 @@ static int dm_resume(void *handle)  	if (amdgpu_in_reset(adev)) {  		dc_state = dm->cached_dc_state; +		/* +		 * The dc->current_state is backed up into dm->cached_dc_state +		 * before we commit 0 streams. +		 * +		 * DC will clear link encoder assignments on the real state +		 * but the changes won't propagate over to the copy we made +		 * before the 0 streams commit. +		 * +		 * DC expects that link encoder assignments are *not* valid +		 * when committing a state, so as a workaround it needs to be +		 * cleared here. +		 */ +		link_enc_cfg_init(dm->dc, dc_state); + +		if (dc_enable_dmub_notifications(adev->dm.dc)) +			amdgpu_dm_outbox_init(adev); +  		r = dm_dmub_hw_init(adev);  		if (r)  			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -2572,20 +2632,11 @@ static int dm_resume(void *handle)  		for (i = 0; i < dc_state->stream_count; i++) {  			dc_state->streams[i]->mode_changed = true; -			for (j = 0; j < dc_state->stream_status->plane_count; j++) { -				dc_state->stream_status->plane_states[j]->update_flags.raw +			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { +				dc_state->stream_status[i].plane_states[j]->update_flags.raw  					= 0xffffffff;  			}  		} -#if defined(CONFIG_DRM_AMD_DC_DCN) -		/* -		 * Resource allocation happens for link encoders for newer ASIC in -		 * dc_validate_global_state, so we need to revalidate it. -		 * -		 * This shouldn't fail (it passed once before), so warn if it does. -		 */ -		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK); -#endif  		WARN_ON(!dc_commit_state(dm->dc, dc_state)); @@ -2608,10 +2659,12 @@ static int dm_resume(void *handle)  	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */  	dc_resource_state_construct(dm->dc, dm_state->context); +	/* Re-enable outbox interrupts for DPIA. */ +	if (dc_enable_dmub_notifications(adev->dm.dc)) +		amdgpu_dm_outbox_init(adev); +  	/* Before powering on DC we need to re-initialize DMUB. */ -	r = dm_dmub_hw_init(adev); -	if (r) -		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); +	dm_dmub_hw_resume(adev);  	/* power on hardware */  	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -2938,13 +2991,12 @@ void amdgpu_dm_update_connector_after_detect(  			aconnector->edid =  				(struct edid *)sink->dc_edid.raw_edid; -			drm_connector_update_edid_property(connector, -							   aconnector->edid);  			if (aconnector->dc_link->aux_mode)  				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,  						    aconnector->edid);  		} +		drm_connector_update_edid_property(connector, aconnector->edid);  		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);  		update_connector_ext_caps(aconnector);  	} else { @@ -3012,7 +3064,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)  		drm_modeset_unlock_all(dev);  		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) -			drm_kms_helper_hotplug_event(dev); +			drm_kms_helper_connector_hotplug_event(connector);  	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {  		if (new_connection_type == dc_connection_none && @@ -3027,7 +3079,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)  		drm_modeset_unlock_all(dev);  		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) -			drm_kms_helper_hotplug_event(dev); +			drm_kms_helper_connector_hotplug_event(connector);  	}  	mutex_unlock(&aconnector->hpd_lock); @@ -3221,7 +3273,7 @@ out:  			dm_restore_drm_connector_state(dev, connector);  			drm_modeset_unlock_all(dev); -			drm_kms_helper_hotplug_event(dev); +			drm_kms_helper_connector_hotplug_event(connector);  		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {  			if (aconnector->fake_enable) @@ -3234,7 +3286,7 @@ out:  			dm_restore_drm_connector_state(dev, connector);  			drm_modeset_unlock_all(dev); -			drm_kms_helper_hotplug_event(dev); +			drm_kms_helper_connector_hotplug_event(connector);  		}  	}  #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -3909,6 +3961,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,  	caps = dm->backlight_caps[bl_idx];  	dm->brightness[bl_idx] = user_brightness; +	/* update scratch register */ +	if (bl_idx == 0) +		amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);  	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);  	link = (struct dc_link *)dm->backlight_link[bl_idx]; @@ -4242,7 +4297,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {  			amdgpu_dm_update_connector_after_detect(aconnector);  			register_backlight_device(dm, link); - +			if (dm->num_of_edps) +				update_connector_ext_caps(aconnector);  			if (psr_feature_enabled)  				amdgpu_dm_set_psr_caps(link);  		} @@ -4250,6 +4306,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)  	} +	/* +	 * Disable vblank IRQs aggressively for power-saving. +	 * +	 * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR +	 * is also supported. +	 */ +	adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled; +  	/* Software is initialized. Now we can register interrupt handlers. */  	switch (adev->asic_type) {  #if defined(CONFIG_DRM_AMD_DC_SI) @@ -6034,12 +6098,74 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,  							struct dsc_dec_dpcd_caps *dsc_caps)  {  	stream->timing.flags.DSC = 0; +	dsc_caps->is_dsc_supported = false; -	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { -		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, -				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, -				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, -				      dsc_caps); +	if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || +		sink->sink_signal == SIGNAL_TYPE_EDP)) { +		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || +			sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) +			dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, +				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, +				aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, +				dsc_caps); +	} +} + +static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, +				    struct dc_sink *sink, struct dc_stream_state *stream, +				    struct dsc_dec_dpcd_caps *dsc_caps, +				    uint32_t max_dsc_target_bpp_limit_override) +{ +	const struct dc_link_settings *verified_link_cap = NULL; +	uint32_t link_bw_in_kbps; +	uint32_t edp_min_bpp_x16, edp_max_bpp_x16; +	struct dc *dc = sink->ctx->dc; +	struct dc_dsc_bw_range bw_range = {0}; +	struct dc_dsc_config dsc_cfg = {0}; + +	verified_link_cap = dc_link_get_link_cap(stream->link); +	link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); +	edp_min_bpp_x16 = 8 * 16; +	edp_max_bpp_x16 = 8 * 16; + +	if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) +		edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; + +	if (edp_max_bpp_x16 < edp_min_bpp_x16) +		edp_min_bpp_x16 = edp_max_bpp_x16; + +	if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], +				dc->debug.dsc_min_slice_height_override, +				edp_min_bpp_x16, edp_max_bpp_x16, +				dsc_caps, +				&stream->timing, +				&bw_range)) { + +		if (bw_range.max_kbps < link_bw_in_kbps) { +			if (dc_dsc_compute_config(dc->res_pool->dscs[0], +					dsc_caps, +					dc->debug.dsc_min_slice_height_override, +					max_dsc_target_bpp_limit_override, +					0, +					&stream->timing, +					&dsc_cfg)) { +				stream->timing.dsc_cfg = dsc_cfg; +				stream->timing.flags.DSC = 1; +				stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; +			} +			return; +		} +	} + +	if (dc_dsc_compute_config(dc->res_pool->dscs[0], +				dsc_caps, +				dc->debug.dsc_min_slice_height_override, +				max_dsc_target_bpp_limit_override, +				link_bw_in_kbps, +				&stream->timing, +				&dsc_cfg)) { +		stream->timing.dsc_cfg = dsc_cfg; +		stream->timing.flags.DSC = 1;  	}  } @@ -6050,6 +6176,9 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,  	struct drm_connector *drm_connector = &aconnector->base;  	uint32_t link_bandwidth_kbps;  	uint32_t max_dsc_target_bpp_limit_override = 0; +	struct dc *dc = sink->ctx->dc; +	uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; +	uint32_t dsc_max_supported_bw_in_kbps;  	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,  							dc_link_get_link_cap(aconnector->dc_link)); @@ -6062,17 +6191,43 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,  	dc_dsc_policy_set_enable_dsc_when_not_needed(  		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); -	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { +	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp && +	    dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { + +		apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); -		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], +	} else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { +		if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { +			if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],  						dsc_caps,  						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,  						max_dsc_target_bpp_limit_override,  						link_bandwidth_kbps,  						&stream->timing,  						&stream->timing.dsc_cfg)) { -			stream->timing.flags.DSC = 1; -			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); +				stream->timing.flags.DSC = 1; +				DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", +								 __func__, drm_connector->name); +			} +		} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { +			timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing); +			max_supported_bw_in_kbps = link_bandwidth_kbps; +			dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; + +			if (timing_bw_in_kbps > max_supported_bw_in_kbps && +					max_supported_bw_in_kbps > 0 && +					dsc_max_supported_bw_in_kbps > 0) +				if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], +						dsc_caps, +						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, +						max_dsc_target_bpp_limit_override, +						dsc_max_supported_bw_in_kbps, +						&stream->timing, +						&stream->timing.dsc_cfg)) { +					stream->timing.flags.DSC = 1; +					DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", +									 __func__, drm_connector->name); +				}  		}  	} @@ -8216,15 +8371,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,  		break;  	case DRM_MODE_CONNECTOR_DisplayPort:  		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; -		if (link->is_dig_mapping_flexible && -		    link->dc->res_pool->funcs->link_encs_assign) { -			link->link_enc = -				link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); -			if (!link->link_enc) -				link->link_enc = -					link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); -		} - +		link->link_enc = dp_get_link_enc(link); +		ASSERT(link->link_enc);  		if (link->link_enc)  			aconnector->base.ycbcr_420_allowed =  			link->link_enc->features.dp_ycbcr420_supported ? true : false; @@ -10615,6 +10763,8 @@ static int dm_update_plane_state(struct dc *dc,  		dm_new_plane_state->dc_state = dc_new_plane_state; +		dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); +  		/* Tell DC to do a full surface update every time there  		 * is a plane change. Inefficient, but works for now.  		 */ @@ -10627,6 +10777,24 @@ static int dm_update_plane_state(struct dc *dc,  	return ret;  } +static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, +				       int *src_w, int *src_h) +{ +	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { +	case DRM_MODE_ROTATE_90: +	case DRM_MODE_ROTATE_270: +		*src_w = plane_state->src_h >> 16; +		*src_h = plane_state->src_w >> 16; +		break; +	case DRM_MODE_ROTATE_0: +	case DRM_MODE_ROTATE_180: +	default: +		*src_w = plane_state->src_w >> 16; +		*src_h = plane_state->src_h >> 16; +		break; +	} +} +  static int dm_check_crtc_cursor(struct drm_atomic_state *state,  				struct drm_crtc *crtc,  				struct drm_crtc_state *new_crtc_state) @@ -10635,6 +10803,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,  	struct drm_plane_state *new_cursor_state, *new_underlying_state;  	int i;  	int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; +	int cursor_src_w, cursor_src_h; +	int underlying_src_w, underlying_src_h;  	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a  	 * cursor per pipe but it's going to inherit the scaling and @@ -10646,10 +10816,9 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,  		return 0;  	} -	cursor_scale_w = new_cursor_state->crtc_w * 1000 / -			 (new_cursor_state->src_w >> 16); -	cursor_scale_h = new_cursor_state->crtc_h * 1000 / -			 (new_cursor_state->src_h >> 16); +	dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h); +	cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w; +	cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;  	for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {  		/* Narrow down to non-cursor planes on the same CRTC as the cursor */ @@ -10660,10 +10829,10 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,  		if (!new_underlying_state->fb)  			continue; -		underlying_scale_w = new_underlying_state->crtc_w * 1000 / -				     (new_underlying_state->src_w >> 16); -		underlying_scale_h = new_underlying_state->crtc_h * 1000 / -				     (new_underlying_state->src_h >> 16); +		dm_get_oriented_plane_size(new_underlying_state, +					   &underlying_src_w, &underlying_src_h); +		underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w; +		underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;  		if (cursor_scale_w != underlying_scale_w ||  		    cursor_scale_h != underlying_scale_h) { @@ -10748,7 +10917,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	enum dc_status status;  	int ret, i;  	bool lock_and_validation_needed = false; -	struct dm_crtc_state *dm_old_crtc_state; +	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;  #if defined(CONFIG_DRM_AMD_DC_DCN)  	struct dsc_mst_fairness_vars vars[MAX_PIPES];  	struct drm_dp_mst_topology_state *mst_state; @@ -10758,8 +10927,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	trace_amdgpu_dm_atomic_check_begin(state);  	ret = drm_atomic_helper_check_modeset(dev, state); -	if (ret) +	if (ret) { +		DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");  		goto fail; +	}  	/* Check connector changes */  	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -10775,6 +10946,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);  		if (IS_ERR(new_crtc_state)) { +			DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");  			ret = PTR_ERR(new_crtc_state);  			goto fail;  		} @@ -10789,8 +10961,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {  			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {  				ret = add_affected_mst_dsc_crtcs(state, crtc); -				if (ret) +				if (ret) { +					DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");  					goto fail; +				}  			}  		}  	} @@ -10805,19 +10979,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  			continue;  		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");  			goto fail; +		}  		if (!new_crtc_state->enable)  			continue;  		ret = drm_atomic_add_affected_connectors(state, crtc); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");  			goto fail; +		}  		ret = drm_atomic_add_affected_planes(state, crtc); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");  			goto fail; +		}  		if (dm_old_crtc_state->dsc_force_changed)  			new_crtc_state->mode_changed = true; @@ -10854,6 +11034,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  			if (IS_ERR(new_plane_state)) {  				ret = PTR_ERR(new_plane_state); +				DRM_DEBUG_DRIVER("new_plane_state is BAD\n");  				goto fail;  			}  		} @@ -10866,8 +11047,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  					    new_plane_state,  					    false,  					    &lock_and_validation_needed); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");  			goto fail; +		}  	}  	/* Disable all crtcs which require disable */ @@ -10877,8 +11060,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  					   new_crtc_state,  					   false,  					   &lock_and_validation_needed); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");  			goto fail; +		}  	}  	/* Enable all crtcs which require enable */ @@ -10888,8 +11073,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  					   new_crtc_state,  					   true,  					   &lock_and_validation_needed); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");  			goto fail; +		}  	}  	/* Add new/modified planes */ @@ -10899,20 +11086,32 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  					    new_plane_state,  					    true,  					    &lock_and_validation_needed); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");  			goto fail; +		}  	}  	/* Run this here since we want to validate the streams we created */  	ret = drm_atomic_helper_check_planes(dev, state); -	if (ret) +	if (ret) { +		DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");  		goto fail; +	} + +	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { +		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); +		if (dm_new_crtc_state->mpo_requested) +			DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); +	}  	/* Check cursor planes scaling */  	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {  		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");  			goto fail; +		}  	}  	if (state->legacy_cursor_update) { @@ -10999,20 +11198,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  	 */  	if (lock_and_validation_needed) {  		ret = dm_atomic_get_state(state, &dm_state); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");  			goto fail; +		}  		ret = do_aquire_global_lock(dev, state); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");  			goto fail; +		}  #if defined(CONFIG_DRM_AMD_DC_DCN) -		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) +		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { +			DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");  			goto fail; +		}  		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");  			goto fail; +		}  #endif  		/* @@ -11022,12 +11229,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,  		 * to get stuck in an infinite loop and hang eventually.  		 */  		ret = drm_dp_mst_atomic_check(state); -		if (ret) +		if (ret) { +			DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");  			goto fail; -		status = dc_validate_global_state(dc, dm_state->context, false); +		} +		status = dc_validate_global_state(dc, dm_state->context, true);  		if (status != DC_OK) { -			drm_dbg_atomic(dev, -				       "DC global validation failure: %s (%d)", +			DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",  				       dc_status_to_str(status), status);  			ret = -EINVAL;  			goto fail; @@ -11149,7 +11357,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,  		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);  	input->offset = offset;  	input->length = length; -	input->total_length = total_length; +	input->cea_total_length = total_length;  	memcpy(input->payload, data, length);  	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd); @@ -11456,8 +11664,10 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,  	return value;  } -int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx, -	uint8_t status_type, uint32_t *operation_result) +static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, +						struct dc_context *ctx, +						uint8_t status_type, +						uint32_t *operation_result)  {  	struct amdgpu_device *adev = ctx->driver_context;  	int return_status = -1; @@ -11528,3 +11738,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context  			ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,  			(uint32_t *)operation_result);  } + +/* + * Check whether seamless boot is supported. + * + * So far we only support seamless boot on CHIP_VANGOGH. + * If everything goes well, we may consider expanding + * seamless boot to other ASICs. + */ +bool check_seamless_boot_capability(struct amdgpu_device *adev) +{ +	switch (adev->asic_type) { +	case CHIP_VANGOGH: +		if (!adev->mman.keep_stolen_vga_memory) +			return true; +		break; +	default: +		break; +	} + +	return false; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 37e61a88d49e..b9a69b0cef23 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -50,9 +50,9 @@  #define AMDGPU_DMUB_NOTIFICATION_MAX 5 -/** +/*   * DMUB Async to Sync Mechanism Status - **/ + */  #define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1  #define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2  #define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 @@ -626,6 +626,8 @@ struct dm_crtc_state {  	bool cm_has_degamma;  	bool cm_is_degamma_srgb; +	bool mpo_requested; +  	int update_type;  	int active_planes; @@ -731,4 +733,7 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;  int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux,  					struct dc_context *ctx, unsigned int link_index,  					void *payload, void *operation_result); + +bool check_seamless_boot_capability(struct amdgpu_device *adev); +  #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index a022e5bb30a5..a71177305bcd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -285,8 +285,12 @@ static int __set_input_tf(struct dc_transfer_func *func,  }  /** + * amdgpu_dm_verify_lut_sizes + * @crtc_state: the DRM CRTC state + *   * Verifies that the Degamma and Gamma LUTs attached to the |crtc_state| are of   * the expected size. + *   * Returns 0 on success.   */  int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index cce062adc439..8a441a22c46e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)  			ret = -EINVAL;  			goto cleanup;  		} + +		if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) && +				(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) { +			DRM_DEBUG_DRIVER("No DP connector available for CRC source\n"); +			ret = -EINVAL; +			goto cleanup; +		} +  	}  #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 9d43ecb1f692..26719efa5396 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -824,6 +824,48 @@ static int dmub_fw_state_show(struct seq_file *m, void *data)  	return seq_write(m, state_base, state_size);  } +/* psr_capability_show() - show eDP panel PSR capability + * + * The read function: sink_psr_capability_show + * Shows if sink has PSR capability or not. + * If yes - the PSR version is appended + * + *	cat /sys/kernel/debug/dri/0/eDP-X/psr_capability + * + * Expected output: + * "Sink support: no\n" - if panel doesn't support PSR + * "Sink support: yes [0x01]\n" - if panel supports PSR1 + * "Driver support: no\n" - if driver doesn't support PSR + * "Driver support: yes [0x01]\n" - if driver supports PSR1 + */ +static int psr_capability_show(struct seq_file *m, void *data) +{ +	struct drm_connector *connector = m->private; +	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); +	struct dc_link *link = aconnector->dc_link; + +	if (!link) +		return -ENODEV; + +	if (link->type == dc_connection_none) +		return -ENODEV; + +	if (!(link->connector_signal & SIGNAL_TYPE_EDP)) +		return -ENODEV; + +	seq_printf(m, "Sink support: %s", yesno(link->dpcd_caps.psr_caps.psr_version != 0)); +	if (link->dpcd_caps.psr_caps.psr_version) +		seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_caps.psr_version); +	seq_puts(m, "\n"); + +	seq_printf(m, "Driver support: %s", yesno(link->psr_settings.psr_feature_enabled)); +	if (link->psr_settings.psr_version) +		seq_printf(m, " [0x%02x]", link->psr_settings.psr_version); +	seq_puts(m, "\n"); + +	return 0; +} +  /*   * Returns the current and maximum output bpc for the connector.   * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc @@ -1243,7 +1285,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,  		dm_restore_drm_connector_state(dev, connector);  		drm_modeset_unlock_all(dev); -		drm_kms_helper_hotplug_event(dev); +		drm_kms_helper_connector_hotplug_event(connector);  	} else if (param[0] == 0) {  		if (!aconnector->dc_link)  			goto unlock; @@ -1265,7 +1307,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,  		dm_restore_drm_connector_state(dev, connector);  		drm_modeset_unlock_all(dev); -		drm_kms_helper_hotplug_event(dev); +		drm_kms_helper_connector_hotplug_event(connector);  	}  unlock: @@ -2467,6 +2509,7 @@ DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);  DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);  #endif  DEFINE_SHOW_ATTRIBUTE(internal_display); +DEFINE_SHOW_ATTRIBUTE(psr_capability);  static const struct file_operations dp_dsc_clock_en_debugfs_fops = {  	.owner = THIS_MODULE, @@ -2712,6 +2755,138 @@ static const struct {  		{"internal_display", &internal_display_fops}  }; +/* + * Returns supported customized link rates by this eDP panel. + * Example usage: cat /sys/kernel/debug/dri/0/eDP-x/ilr_setting + */ +static int edp_ilr_show(struct seq_file *m, void *unused) +{ +	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); +	struct dc_link *link = aconnector->dc_link; +	uint8_t supported_link_rates[16]; +	uint32_t link_rate_in_khz; +	uint32_t entry = 0; +	uint8_t dpcd_rev; + +	memset(supported_link_rates, 0, sizeof(supported_link_rates)); +	dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES, +		supported_link_rates, sizeof(supported_link_rates)); + +	dpcd_rev = link->dpcd_caps.dpcd_rev.raw; + +	if (dpcd_rev >= DP_DPCD_REV_13 && +		(supported_link_rates[entry+1] != 0 || supported_link_rates[entry] != 0)) { + +		for (entry = 0; entry < 16; entry += 2) { +			link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + +										supported_link_rates[entry]) * 200; +			seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); +		} +	} else { +		seq_printf(m, "ILR is not supported by this eDP panel.\n"); +	} + +	return 0; +} + +/* + * Set supported customized link rate to eDP panel. + * + * echo <lane_count>  <link_rate option> > ilr_setting + * + * for example, supported ILR : [0] 1620000 kHz [1] 2160000 kHz [2] 2430000 kHz ... + * echo 4 1 > /sys/kernel/debug/dri/0/eDP-x/ilr_setting + * to set 4 lanes and 2.16 GHz + */ +static ssize_t edp_ilr_write(struct file *f, const char __user *buf, +				 size_t size, loff_t *pos) +{ +	struct amdgpu_dm_connector *connector = file_inode(f)->i_private; +	struct dc_link *link = connector->dc_link; +	struct amdgpu_device *adev = drm_to_adev(connector->base.dev); +	struct dc *dc = (struct dc *)link->dc; +	struct dc_link_settings prefer_link_settings; +	char *wr_buf = NULL; +	const uint32_t wr_buf_size = 40; +	/* 0: lane_count; 1: link_rate */ +	int max_param_num = 2; +	uint8_t param_nums = 0; +	long param[2]; +	bool valid_input = true; + +	if (size == 0) +		return -EINVAL; + +	wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); +	if (!wr_buf) +		return -ENOMEM; + +	if (parse_write_buffer_into_params(wr_buf, wr_buf_size, +					   (long *)param, buf, +					   max_param_num, +					   ¶m_nums)) { +		kfree(wr_buf); +		return -EINVAL; +	} + +	if (param_nums <= 0) { +		kfree(wr_buf); +		return -EINVAL; +	} + +	switch (param[0]) { +	case LANE_COUNT_ONE: +	case LANE_COUNT_TWO: +	case LANE_COUNT_FOUR: +		break; +	default: +		valid_input = false; +		break; +	} + +	if (param[1] >= link->dpcd_caps.edp_supported_link_rates_count) +		valid_input = false; + +	if (!valid_input) { +		kfree(wr_buf); +		DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n"); +		prefer_link_settings.use_link_rate_set = false; +		dc_link_set_preferred_training_settings(dc, NULL, NULL, link, true); +		return size; +	} + +	/* save user force lane_count, link_rate to preferred settings +	 * spread spectrum will not be changed +	 */ +	prefer_link_settings.link_spread = link->cur_link_settings.link_spread; +	prefer_link_settings.lane_count = param[0]; +	prefer_link_settings.use_link_rate_set = true; +	prefer_link_settings.link_rate_set = param[1]; +	prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]]; + +	mutex_lock(&adev->dm.dc_lock); +	dc_link_set_preferred_training_settings(dc, &prefer_link_settings, +						NULL, link, false); +	mutex_unlock(&adev->dm.dc_lock); + +	kfree(wr_buf); +	return size; +} + +static int edp_ilr_open(struct inode *inode, struct file *file) +{ +	return single_open(file, edp_ilr_show, inode->i_private); +} + +static const struct file_operations edp_ilr_debugfs_fops = { +	.owner = THIS_MODULE, +	.open = edp_ilr_open, +	.read = seq_read, +	.llseek = seq_lseek, +	.release = single_release, +	.write = edp_ilr_write +}; +  void connector_debugfs_init(struct amdgpu_dm_connector *connector)  {  	int i; @@ -2726,11 +2901,14 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)  		}  	}  	if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { +		debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops);  		debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops);  		debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector,  				    ¤t_backlight_fops);  		debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector,  				    &target_backlight_fops); +		debugfs_create_file("ilr_setting", 0644, dir, connector, +					&edp_ilr_debugfs_fops);  	}  	for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { @@ -2909,10 +3087,13 @@ static int crc_win_update_set(void *data, u64 val)  	struct amdgpu_device *adev = drm_to_adev(new_crtc->dev);  	struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; +	if (!crc_rd_wrk) +		return 0; +  	if (val) {  		spin_lock_irq(&adev_to_drm(adev)->event_lock);  		spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); -		if (crc_rd_wrk && crc_rd_wrk->crtc) { +		if (crc_rd_wrk->crtc) {  			old_crtc = crc_rd_wrk->crtc;  			old_acrtc = to_amdgpu_crtc(old_crtc);  		} @@ -3190,6 +3371,32 @@ static int disable_hpd_get(void *data, u64 *val)  DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get,  			 disable_hpd_set, "%llu\n"); +#if defined(CONFIG_DRM_AMD_DC_DCN) +/* + * Temporary w/a to force sst sequence in M42D DP2 mst receiver + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst + */ +static int dp_force_sst_set(void *data, u64 val) +{ +	struct amdgpu_device *adev = data; + +	adev->dm.dc->debug.set_mst_en_for_sst = val; + +	return 0; +} + +static int dp_force_sst_get(void *data, u64 *val) +{ +	struct amdgpu_device *adev = data; + +	*val = adev->dm.dc->debug.set_mst_en_for_sst; + +	return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get, +			 dp_force_sst_set, "%llu\n"); +#endif +  /*   * Sets the DC visual confirm debug option from the given string.   * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm @@ -3299,6 +3506,10 @@ void dtn_debugfs_init(struct amdgpu_device *adev)  			    adev, &mst_topo_fops);  	debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev,  			    &dtn_log_fops); +#if defined(CONFIG_DRM_AMD_DC_DCN) +	debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, +				&dp_set_mst_en_for_sst_ops); +#endif  	debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,  				   &visual_confirm_fops); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 8cbeeb7c986d..29f07c26d080 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -83,16 +83,17 @@ static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)   *	void   * */  enum dc_edid_status dm_helpers_parse_edid_caps( -		struct dc_context *ctx, +		struct dc_link *link,  		const struct dc_edid *edid,  		struct dc_edid_caps *edid_caps)  { +	struct amdgpu_dm_connector *aconnector = link->priv; +	struct drm_connector *connector = &aconnector->base;  	struct edid *edid_buf = (struct edid *) edid->raw_edid;  	struct cea_sad *sads;  	int sad_count = -1;  	int sadb_count = -1;  	int i = 0; -	int j = 0;  	uint8_t *sadb = NULL;  	enum dc_edid_status result = EDID_OK; @@ -111,23 +112,11 @@ enum dc_edid_status dm_helpers_parse_edid_caps(  	edid_caps->manufacture_week = edid_buf->mfg_week;  	edid_caps->manufacture_year = edid_buf->mfg_year; -	/* One of the four detailed_timings stores the monitor name. It's -	 * stored in an array of length 13. */ -	for (i = 0; i < 4; i++) { -		if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) { -			while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) { -				if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n') -					break; +	drm_edid_get_monitor_name(edid_buf, +				  edid_caps->display_name, +				  AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); -				edid_caps->display_name[j] = -					edid_buf->detailed_timings[i].data.other_data.data.str.str[j]; -				j++; -			} -		} -	} - -	edid_caps->edid_hdmi = drm_detect_hdmi_monitor( -			(struct edid *) edid->raw_edid); +	edid_caps->edid_hdmi = connector->display_info.is_hdmi;  	sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);  	if (sad_count <= 0) @@ -584,9 +573,18 @@ bool dm_helpers_dp_write_dsc_enable(  		ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1);  	} -	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { -		ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); -		DC_LOG_DC("Send DSC %s to sst display\n", enable_dsc ? "enable" : "disable"); +	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { +#if defined(CONFIG_DRM_AMD_DC_DCN) +		if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { +#endif +			ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); +			DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); +#if defined(CONFIG_DRM_AMD_DC_DCN) +		} else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { +			ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); +			DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); +		} +#endif  	}  	return (ret > 0); @@ -650,14 +648,8 @@ enum dc_edid_status dm_helpers_read_local_edid(  		/* We don't need the original edid anymore */  		kfree(edid); -		/* connector->display_info will be parsed from EDID and saved -		 * into drm_connector->display_info from edid by call stack -		 * below: -		 * drm_parse_ycbcr420_deep_color_info -		 * drm_parse_hdmi_forum_vsdb -		 * drm_parse_cea_ext -		 * drm_add_display_info -		 * drm_connector_update_edid_property +		/* connector->display_info is parsed from EDID and saved +		 * into drm_connector->display_info  		 *  		 * drm_connector->display_info will be used by amdgpu_dm funcs,  		 * like fill_stream_properties_from_drm_display_mode @@ -665,7 +657,7 @@ enum dc_edid_status dm_helpers_read_local_edid(  		amdgpu_dm_update_connector_after_detect(aconnector);  		edid_status = dm_helpers_parse_edid_caps( -						ctx, +						link,  						&sink->dc_edid,  						&sink->edid_caps); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 32a5ce09a62a..cc34a35d0bcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,6 +36,8 @@  #include "dm_helpers.h"  #include "dc_link_ddc.h" +#include "ddc_service_types.h" +#include "dpcd_defs.h"  #include "i2caux_interface.h"  #include "dmub_cmd.h" @@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {  };  #if defined(CONFIG_DRM_AMD_DC_DCN) +static bool needs_dsc_aux_workaround(struct dc_link *link) +{ +	if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && +	    (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && +	    link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) +		return true; + +	return false; +} +  static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)  {  	struct dc_sink *dc_sink = aconnector->dc_sink; @@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto  	u8 *dsc_branch_dec_caps = NULL;  	aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); -#if defined(CONFIG_HP_HOOK_WORKAROUND) +  	/*  	 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs  	 * because it only check the dsc/fec caps of the "port variable" and not the dock @@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto  	 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux  	 *  	 */ - -	if (!aconnector->dsc_aux && !port->parent->port_parent) +	if (!aconnector->dsc_aux && !port->parent->port_parent && +	    needs_dsc_aux_workaround(aconnector->dc_link))  		aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; -#endif +  	if (!aconnector->dsc_aux)  		return false; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index c022e56f9459..c510638b4f99 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -26,6 +26,73 @@  #include "amdgpu_dm_psr.h"  #include "dc.h"  #include "dm_helpers.h" +#include "amdgpu_dm.h" + +static bool link_get_psr_caps(struct dc_link *link) +{ +	uint8_t psr_dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; +	uint8_t edp_rev_dpcd_data; + + + +	if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, +				    psr_dpcd_data, sizeof(psr_dpcd_data))) +		return false; + +	if (!dm_helpers_dp_read_dpcd(NULL, link, DP_EDP_DPCD_REV, +				    &edp_rev_dpcd_data, sizeof(edp_rev_dpcd_data))) +		return false; + +	link->dpcd_caps.psr_caps.psr_version = psr_dpcd_data[0]; +	link->dpcd_caps.psr_caps.edp_revision = edp_rev_dpcd_data; + +#ifdef CONFIG_DRM_AMD_DC_DCN +	if (link->dpcd_caps.psr_caps.psr_version > 0x1) { +		uint8_t alpm_dpcd_data; +		uint8_t su_granularity_dpcd_data; + +		if (!dm_helpers_dp_read_dpcd(NULL, link, DP_RECEIVER_ALPM_CAP, +						&alpm_dpcd_data, sizeof(alpm_dpcd_data))) +			return false; + +		if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR2_SU_Y_GRANULARITY, +						&su_granularity_dpcd_data, sizeof(su_granularity_dpcd_data))) +			return false; + +		link->dpcd_caps.psr_caps.y_coordinate_required = psr_dpcd_data[1] & DP_PSR2_SU_Y_COORDINATE_REQUIRED; +		link->dpcd_caps.psr_caps.su_granularity_required = psr_dpcd_data[1] & DP_PSR2_SU_GRANULARITY_REQUIRED; + +		link->dpcd_caps.psr_caps.alpm_cap = alpm_dpcd_data & DP_ALPM_CAP; +		link->dpcd_caps.psr_caps.standby_support = alpm_dpcd_data & (1 << 1); + +		link->dpcd_caps.psr_caps.su_y_granularity = su_granularity_dpcd_data; +	} +#endif +	return true; +} + +#ifdef CONFIG_DRM_AMD_DC_DCN +static bool link_supports_psrsu(struct dc_link *link) +{ +	struct dc *dc = link->ctx->dc; + +	if (!dc->caps.dmcub_support) +		return false; + +	if (dc->ctx->dce_version < DCN_VERSION_3_1) +		return false; + +	if (!link->dpcd_caps.psr_caps.alpm_cap || +	    !link->dpcd_caps.psr_caps.y_coordinate_required) +		return false; + +	if (link->dpcd_caps.psr_caps.su_granularity_required && +	    !link->dpcd_caps.psr_caps.su_y_granularity) +		return false; + +	return true; +} +#endif  /*   * amdgpu_dm_set_psr_caps() - set link psr capabilities @@ -34,26 +101,34 @@   */  void amdgpu_dm_set_psr_caps(struct dc_link *link)  { -	uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; -  	if (!(link->connector_signal & SIGNAL_TYPE_EDP))  		return; +  	if (link->type == dc_connection_none)  		return; -	if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, -					dpcd_data, sizeof(dpcd_data))) { -		link->dpcd_caps.psr_caps.psr_version = dpcd_data[0]; -		if (dpcd_data[0] == 0) { -			link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; -			link->psr_settings.psr_feature_enabled = false; -		} else { +	if (!link_get_psr_caps(link)) { +		DRM_ERROR("amdgpu: Failed to read PSR Caps!\n"); +		return; +	} + +	if (link->dpcd_caps.psr_caps.psr_version == 0) { +		link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; +		link->psr_settings.psr_feature_enabled = false; + +	} else { +#ifdef CONFIG_DRM_AMD_DC_DCN +		if (link_supports_psrsu(link)) +			link->psr_settings.psr_version = DC_PSR_VERSION_SU_1; +		else +#endif  			link->psr_settings.psr_version = DC_PSR_VERSION_1; -			link->psr_settings.psr_feature_enabled = true; -		} -		DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled); +		link->psr_settings.psr_feature_enabled = true;  	} + +	DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled); +  }  /* diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index a4bef4364afd..1e385d55e7fb 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -2995,7 +2995,7 @@ static bool bios_parser2_construct(  		&bp->object_info_tbl.revision);  	if (bp->object_info_tbl.revision.major == 1 -		&& bp->object_info_tbl.revision.minor >= 4) { +		&& bp->object_info_tbl.revision.minor == 4) {  		struct display_object_info_table_v1_4 *tbl_v1_4;  		tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4, @@ -3004,8 +3004,10 @@ static bool bios_parser2_construct(  			return false;  		bp->object_info_tbl.v1_4 = tbl_v1_4; -	} else +	} else { +		ASSERT(0);  		return false; +	}  	dal_firmware_parser_init_cmd_tbl(bp);  	dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 6b248cd2a461..ec19678a0702 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -739,7 +739,9 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,  		hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);  } -unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw_internal_rev, uint32_t pci_revision_id) +static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, +						      uint32_t hw_internal_rev, +						      uint32_t pci_revision_id)  {  	/* for low power RV2 variants, the highest voltage level we want is 0 */  	if ((chip_family == FAMILY_RV) && @@ -763,7 +765,7 @@ unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw  	return 4;  } -bool dcn_validate_bandwidth( +bool dcn10_validate_bandwidth(  		struct dc *dc,  		struct dc_state *context,  		bool fast_validate) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 26f96ee32472..9200c8ce02ba 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -308,8 +308,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)  	case FAMILY_NV:  		if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {  			dcn3_clk_mgr_destroy(clk_mgr); -		} -		if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { +		} else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) {  			dcn3_clk_mgr_destroy(clk_mgr);  		}  		if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 76ec8ec92efd..60761ff3cbf1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -34,7 +34,7 @@  #include "rv1_clk_mgr_vbios_smu.h"  #include "rv1_clk_mgr_clk.h" -void rv1_init_clocks(struct clk_mgr *clk_mgr) +static void rv1_init_clocks(struct clk_mgr *clk_mgr)  {  	memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));  } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c index fe18bb9e19aa..06bab24d8e27 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c @@ -28,6 +28,8 @@  #include "reg_helper.h"  #include <linux/delay.h> +#include "rv1_clk_mgr_vbios_smu.h" +  #define MAX_INSTANCE	5  #define MAX_SEGMENT		5 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 2108bff49d4e..cac80ba69072 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -38,7 +38,6 @@  #include "clk/clk_11_0_0_offset.h"  #include "clk/clk_11_0_0_sh_mask.h" -#include "irq/dcn20/irq_service_dcn20.h"  #undef FN  #define FN(reg_name, field_name) \ @@ -223,8 +222,6 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,  	bool force_reset = false;  	bool p_state_change_support;  	int total_plane_count; -	int irq_src; -	uint32_t hpd_state;  	if (dc->work_arounds.skip_clock_update)  		return; @@ -242,13 +239,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,  	if (dc->res_pool->pp_smu)  		pp_smu = &dc->res_pool->pp_smu->nv_funcs; -	for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) { -		hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src); -		if (hpd_state) -			break; -	} - -	if (display_count == 0 && !hpd_state) +	if (display_count == 0)  		enter_display_off = true;  	if (enter_display_off == safe_to_lower) { @@ -409,7 +400,7 @@ void dcn2_init_clocks(struct clk_mgr *clk_mgr)  	clk_mgr->clks.prev_p_state_change_support = true;  } -void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base) +static void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)  {  	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);  	struct pp_smu_funcs_nv *pp_smu = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c index db9950244c7b..fbdd0a92d146 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c @@ -74,42 +74,6 @@ static const struct clk_mgr_mask clk_mgr_mask = {  	CLK_COMMON_MASK_SH_LIST_DCN201_BASE(_MASK)  }; -void dcn201_update_clocks_vbios(struct clk_mgr *clk_mgr, -			struct dc_state *context, -			bool safe_to_lower) -{ -	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; - -	bool update_dppclk = false; -	bool update_dispclk = false; - -	if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) { -		clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz; -		update_dppclk = true; -	} - -	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) { -		clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; -		update_dispclk = true; -	} - -	if (update_dppclk || update_dispclk) { -		struct bp_set_dce_clock_parameters dce_clk_params; -		struct dc_bios *bp = clk_mgr->ctx->dc_bios; - -		if (update_dispclk) { -			memset(&dce_clk_params, 0, sizeof(dce_clk_params)); -			dce_clk_params.target_clock_frequency = new_clocks->dispclk_khz; -			dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; -			dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK; -			bp->funcs->set_dce_clock(bp, &dce_clk_params); -		} -		/* currently there is no DCECLOCK_TYPE_DPPCLK type defined in VBIOS interface. -		 * vbios program DPPCLK to the same DispCLK limitation -		 */ -	} -} -  static void dcn201_init_clocks(struct clk_mgr *clk_mgr)  {  	memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); @@ -126,10 +90,8 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,  	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);  	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;  	struct dc *dc = clk_mgr_base->ctx->dc; -	int display_count;  	bool update_dppclk = false;  	bool update_dispclk = false; -	bool enter_display_off = false;  	bool dpp_clock_lowered = false;  	bool force_reset = false;  	bool p_state_change_support; @@ -145,10 +107,7 @@ static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,  		dcn2_read_clocks_from_hw_dentist(clk_mgr_base);  	} -	display_count = clk_mgr_helper_get_active_display_cnt(dc, context); - -	if (display_count == 0) -		enter_display_off = true; +	clk_mgr_helper_get_active_display_cnt(dc, context);  	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz))  		clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index ac2d4c4f04e4..f4dee0e48a67 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -42,7 +42,6 @@  #include "clk/clk_10_0_2_sh_mask.h"  #include "renoir_ip_offset.h" -#include "irq/dcn21/irq_service_dcn21.h"  /* Constants */ @@ -56,9 +55,7 @@  /* TODO: evaluate how to lower or disable all dcn clocks in screen off case */ -int rn_get_active_display_cnt_wa( -		struct dc *dc, -		struct dc_state *context) +static int rn_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context)  {  	int i, display_count;  	bool tmds_present = false; @@ -77,7 +74,8 @@ int rn_get_active_display_cnt_wa(  		const struct dc_link *link = dc->links[i];  		/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ -		if (link->link_enc->funcs->is_dig_enabled(link->link_enc)) +		if (link->link_enc->funcs->is_dig_enabled && +		    link->link_enc->funcs->is_dig_enabled(link->link_enc))  			display_count++;  	} @@ -88,7 +86,7 @@ int rn_get_active_display_cnt_wa(  	return display_count;  } -void rn_set_low_power_state(struct clk_mgr *clk_mgr_base) +static void rn_set_low_power_state(struct clk_mgr *clk_mgr_base)  {  	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -122,7 +120,7 @@ static void rn_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,  } -void rn_update_clocks(struct clk_mgr *clk_mgr_base, +static void rn_update_clocks(struct clk_mgr *clk_mgr_base,  			struct dc_state *context,  			bool safe_to_lower)  { @@ -130,11 +128,9 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,  	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;  	struct dc *dc = clk_mgr_base->ctx->dc;  	int display_count; -	int irq_src;  	bool update_dppclk = false;  	bool update_dispclk = false;  	bool dpp_clock_lowered = false; -	uint32_t hpd_state;  	struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; @@ -151,14 +147,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,  			display_count = rn_get_active_display_cnt_wa(dc, context); -			for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) { -				hpd_state = dc_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src); -				if (hpd_state) -					break; -			} -  			/* if we can go lower, go lower */ -			if (display_count == 0 && !hpd_state) { +			if (display_count == 0) {  				rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);  				/* update power state */  				clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; @@ -437,25 +427,14 @@ static void rn_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an  	}  } -/* This function produce translated logical clk state values*/ -void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s) -{ -	struct clk_state_registers_and_bypass sb = { 0 }; -	struct clk_log_info log_info = { 0 }; - -	rn_dump_clk_registers(&sb, clk_mgr_base, &log_info); - -	s->dprefclk_khz = sb.dprefclk * 1000; -} - -void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) +static void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base)  {  	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);  	rn_vbios_smu_enable_pme_wa(clk_mgr);  } -void rn_init_clocks(struct clk_mgr *clk_mgr) +static void rn_init_clocks(struct clk_mgr *clk_mgr)  {  	memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));  	// Assumption is that boot state always supports pstate diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 9f7eed6688c4..8161a6ae410d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -33,6 +33,8 @@  #include "mp/mp_12_0_0_offset.h"  #include "mp/mp_12_0_0_sh_mask.h" +#include "rn_clk_mgr_vbios_smu.h" +  #define REG(reg_name) \  	(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) @@ -86,7 +88,9 @@ static uint32_t rn_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, unsig  } -int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned int msg_id, unsigned int param) +static int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, +					    unsigned int msg_id, +					    unsigned int param)  {  	uint32_t result; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 1861a147a7fa..f977f29907df 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -252,6 +252,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,  	bool update_dispclk = false;  	bool enter_display_off = false;  	bool dpp_clock_lowered = false; +	bool update_pstate_unsupported_clk = false;  	struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;  	bool force_reset = false;  	bool update_uclk = false; @@ -299,13 +300,28 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,  	clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;  	total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);  	p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0); -	if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { + +	// invalidate the current P-State forced min in certain dc_mode_softmax situations +	if (dc->clk_mgr->dc_mode_softmax_enabled && safe_to_lower && !p_state_change_support) { +		if ((new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) != +				(clk_mgr_base->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)) +			update_pstate_unsupported_clk = true; +	} + +	if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support) || +			update_pstate_unsupported_clk) {  		clk_mgr_base->clks.p_state_change_support = p_state_change_support;  		/* to disable P-State switching, set UCLK min = max */ -		if (!clk_mgr_base->clks.p_state_change_support) -			dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, +		if (!clk_mgr_base->clks.p_state_change_support) { +			if (dc->clk_mgr->dc_mode_softmax_enabled && +				new_clocks->dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) +				dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, +					dc->clk_mgr->bw_params->dc_mode_softmax_memclk); +			else +				dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,  					clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz); +		}  	}  	/* Always update saved value, even if new value not set due to P-State switching unsupported */ @@ -421,6 +437,24 @@ static void dcn3_set_hard_max_memclk(struct clk_mgr *clk_mgr_base)  			clk_mgr_base->bw_params->clk_table.entries[clk_mgr_base->bw_params->clk_table.num_entries - 1].memclk_mhz);  } +static void dcn3_set_max_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz) +{ +	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + +	if (!clk_mgr->smu_present) +		return; + +	dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz); +} +static void dcn3_set_min_memclk(struct clk_mgr *clk_mgr_base, unsigned int memclk_mhz) +{ +	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + +	if (!clk_mgr->smu_present) +		return; +	dcn30_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, memclk_mhz); +} +  /* Get current memclk states, update bounding box */  static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)  { @@ -436,6 +470,8 @@ static void dcn3_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base)  			&num_levels);  	clk_mgr_base->bw_params->clk_table.num_entries = num_levels ? num_levels : 1; +	clk_mgr_base->bw_params->dc_mode_softmax_memclk = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_UCLK); +  	/* Refresh bounding box */  	clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(  			clk_mgr->base.ctx->dc, clk_mgr_base->bw_params); @@ -505,6 +541,8 @@ static struct clk_mgr_funcs dcn3_funcs = {  		.notify_wm_ranges = dcn3_notify_wm_ranges,  		.set_hard_min_memclk = dcn3_set_hard_min_memclk,  		.set_hard_max_memclk = dcn3_set_hard_max_memclk, +		.set_max_memclk = dcn3_set_max_memclk, +		.set_min_memclk = dcn3_set_min_memclk,  		.get_memclk_states_from_smu = dcn3_get_memclk_states_from_smu,  		.are_clock_states_equal = dcn3_are_clock_states_equal,  		.enable_pme_wa = dcn3_enable_pme_wa, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c index 6ea642615854..d9920d91838d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c @@ -88,9 +88,9 @@ static uint32_t dcn301_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, u  	return res_val;  } -int dcn301_smu_send_msg_with_param( -		struct clk_mgr_internal *clk_mgr, -		unsigned int msg_id, unsigned int param) +static int dcn301_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, +					  unsigned int msg_id, +					  unsigned int param)  {  	uint32_t result; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c index 3eee32faa208..48005def1164 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c @@ -89,9 +89,9 @@ static int vg_get_active_display_cnt_wa(  	return display_count;  } -void vg_update_clocks(struct clk_mgr *clk_mgr_base, -			struct dc_state *context, -			bool safe_to_lower) +static void vg_update_clocks(struct clk_mgr *clk_mgr_base, +			     struct dc_state *context, +			     bool safe_to_lower)  {  	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);  	struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; @@ -367,18 +367,6 @@ static void vg_dump_clk_registers(struct clk_state_registers_and_bypass *regs_an  	}  } -/* This function produce translated logical clk state values*/ -void vg_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s) -{ - -	struct clk_state_registers_and_bypass sb = { 0 }; -	struct clk_log_info log_info = { 0 }; - -	vg_dump_clk_registers(&sb, clk_mgr_base, &log_info); - -	s->dprefclk_khz = sb.dprefclk * 1000; -} -  static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)  {  	struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); @@ -386,7 +374,7 @@ static void vg_enable_pme_wa(struct clk_mgr *clk_mgr_base)  	dcn301_smu_enable_pme_wa(clk_mgr);  } -void vg_init_clocks(struct clk_mgr *clk_mgr) +static void vg_init_clocks(struct clk_mgr *clk_mgr)  {  	memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));  	// Assumption is that boot state always supports pstate @@ -753,7 +741,7 @@ void vg_clk_mgr_construct(  				sizeof(struct watermarks),  				&clk_mgr->smu_wm_set.mc_address.quad_part); -	if (clk_mgr->smu_wm_set.wm_set == 0) { +	if (!clk_mgr->smu_wm_set.wm_set) {  		clk_mgr->smu_wm_set.wm_set = &dummy_wms;  		clk_mgr->smu_wm_set.mc_address.quad_part = 0;  	} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index f4c9a458ace8..4162ce40089b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -66,7 +66,7 @@  #define TO_CLK_MGR_DCN31(clk_mgr)\  	container_of(clk_mgr, struct clk_mgr_dcn31, base) -int dcn31_get_active_display_cnt_wa( +static int dcn31_get_active_display_cnt_wa(  		struct dc *dc,  		struct dc_state *context)  { @@ -118,7 +118,7 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)  	}  } -static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, +void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,  			struct dc_state *context,  			bool safe_to_lower)  { @@ -158,6 +158,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,  				union display_idle_optimization_u idle_info = { 0 };  				idle_info.idle_info.df_request_disabled = 1;  				idle_info.idle_info.phy_ref_clk_off = 1; +				idle_info.idle_info.s0i2_rdy = 1;  				dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);  				/* update power state */  				clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; @@ -284,7 +285,7 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)  	dcn31_smu_enable_pme_wa(clk_mgr);  } -static void dcn31_init_clocks(struct clk_mgr *clk_mgr) +void dcn31_init_clocks(struct clk_mgr *clk_mgr)  {  	memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));  	// Assumption is that boot state always supports pstate @@ -294,7 +295,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr)  	clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;  } -static bool dcn31_are_clock_states_equal(struct dc_clocks *a, +bool dcn31_are_clock_states_equal(struct dc_clocks *a,  		struct dc_clocks *b)  {  	if (a->dispclk_khz != b->dispclk_khz) @@ -540,10 +541,9 @@ static unsigned int find_clk_for_voltage(  	return clock;  } -void dcn31_clk_mgr_helper_populate_bw_params( -		struct clk_mgr_internal *clk_mgr, -		struct integrated_info *bios_info, -		const DpmClocks_t *clock_table) +static void dcn31_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr, +						    struct integrated_info *bios_info, +						    const DpmClocks_t *clock_table)  {  	int i, j;  	struct clk_bw_params *bw_params = clk_mgr->base.bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h index f8f100535526..961b10a49486 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h @@ -39,6 +39,13 @@ struct clk_mgr_dcn31 {  	struct dcn31_smu_watermark_set smu_wm_set;  }; +bool dcn31_are_clock_states_equal(struct dc_clocks *a, +		struct dc_clocks *b); +void dcn31_init_clocks(struct clk_mgr *clk_mgr); +void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, +			struct dc_state *context, +			bool safe_to_lower); +  void dcn31_clk_mgr_construct(struct dc_context *ctx,  		struct clk_mgr_dcn31 *clk_mgr,  		struct pp_smu_funcs *pp_smu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 8c2b77eb9459..a1011f3273f3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -95,9 +95,9 @@ static uint32_t dcn31_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un  	return res_val;  } -int dcn31_smu_send_msg_with_param( -		struct clk_mgr_internal *clk_mgr, -		unsigned int msg_id, unsigned int param) +static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, +					 unsigned int msg_id, +					 unsigned int param)  {  	uint32_t result; @@ -119,6 +119,12 @@ int dcn31_smu_send_msg_with_param(  	result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); +	if (result == VBIOSSMC_Result_Failed) { +		ASSERT(0); +		REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); +		return -1; +	} +  	if (IS_SMU_TIMEOUT(result)) {  		ASSERT(0);  		dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0ded4decee05..01c8849b9db2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -221,9 +221,9 @@ static bool create_links(  		link = link_create(&link_init_params);  		if (link) { -				dc->links[dc->link_count] = link; -				link->dc = dc; -				++dc->link_count; +			dc->links[dc->link_count] = link; +			link->dc = dc; +			++dc->link_count;  		}  	} @@ -274,24 +274,6 @@ static bool create_links(  			goto failed_alloc;  		} -#if defined(CONFIG_DRM_AMD_DC_DCN) -		if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && -				dc->caps.dp_hpo && -				link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) { -			/* FPGA case - Allocate HPO DP link encoder */ -			if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) { -				link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i]; - -				if (link->hpo_dp_link_enc == NULL) { -					BREAK_TO_DEBUGGER(); -					goto failed_alloc; -				} -				link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source; -				link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter; -			} -		} -#endif -  		link->link_status.dpcd_caps = &link->dpcd_caps;  		enc_init.ctx = dc->ctx; @@ -808,6 +790,10 @@ void dc_stream_set_static_screen_params(struct dc *dc,  static void dc_destruct(struct dc *dc)  { +	// reset link encoder assignment table on destruct +	if (dc->res_pool && dc->res_pool->funcs->link_encs_assign) +		link_enc_cfg_init(dc, dc->current_state); +  	if (dc->current_state) {  		dc_release_state(dc->current_state);  		dc->current_state = NULL; @@ -1016,8 +1002,6 @@ static bool dc_construct(struct dc *dc,  		goto fail;  	} -	dc_resource_state_construct(dc, dc->current_state); -  	if (!create_links(dc, init_params->num_virtual_links))  		goto fail; @@ -1027,8 +1011,7 @@ static bool dc_construct(struct dc *dc,  	if (!create_link_encoders(dc))  		goto fail; -	/* Initialise DIG link encoder resource tracking variables. */ -	link_enc_cfg_init(dc, dc->current_state); +	dc_resource_state_construct(dc, dc->current_state);  	return true; @@ -1830,6 +1813,19 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)  		dc_stream_log(dc, stream);  	} +	/* +	 * Previous validation was perfomred with fast_validation = true and +	 * the full DML state required for hardware programming was skipped. +	 * +	 * Re-validate here to calculate these parameters / watermarks. +	 */ +	result = dc_validate_global_state(dc, context, false); +	if (result != DC_OK) { +		DC_LOG_ERROR("DC commit global validation failure: %s (%d)", +			     dc_status_to_str(result), result); +		return result; +	} +  	result = dc_commit_state_no_check(dc, context);  	return (result == DC_OK); @@ -2870,7 +2866,8 @@ static void commit_planes_for_stream(struct dc *dc,  #endif  	if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) -		if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { +		if (top_pipe_to_program && +			top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {  			if (should_use_dmub_lock(stream->link)) {  				union dmub_hw_lock_flags hw_locks = { 0 };  				struct dmub_hw_lock_inst_flags inst_flags = { 0 }; @@ -2979,12 +2976,12 @@ static void commit_planes_for_stream(struct dc *dc,  #ifdef CONFIG_DRM_AMD_DC_DCN  		if (dc->debug.validate_dml_output) {  			for (i = 0; i < dc->res_pool->pipe_count; i++) { -				struct pipe_ctx cur_pipe = context->res_ctx.pipe_ctx[i]; -				if (cur_pipe.stream == NULL) +				struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i]; +				if (cur_pipe->stream == NULL)  					continue; -				cur_pipe.plane_res.hubp->funcs->validate_dml_output( -						cur_pipe.plane_res.hubp, dc->ctx, +				cur_pipe->plane_res.hubp->funcs->validate_dml_output( +						cur_pipe->plane_res.hubp, dc->ctx,  						&context->res_ctx.pipe_ctx[i].rq_regs,  						&context->res_ctx.pipe_ctx[i].dlg_regs,  						&context->res_ctx.pipe_ctx[i].ttu_regs); @@ -3426,7 +3423,7 @@ struct dc_sink *dc_link_add_remote_sink(  		goto fail_add_sink;  	edid_status = dm_helpers_parse_edid_caps( -			link->ctx, +			link,  			&dc_sink->dc_edid,  			&dc_sink->edid_caps); @@ -3583,6 +3580,98 @@ void dc_lock_memory_clock_frequency(struct dc *dc)  			core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);  } +static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz) +{ +	struct dc_state *context = dc->current_state; +	struct hubp *hubp; +	struct pipe_ctx *pipe; +	int i; + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		pipe = &context->res_ctx.pipe_ctx[i]; + +		if (pipe->stream != NULL) { +			dc->hwss.disable_pixel_data(dc, pipe, true); + +			// wait for double buffer +			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); +			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK); +			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE); + +			hubp = pipe->plane_res.hubp; +			hubp->funcs->set_blank_regs(hubp, true); +		} +	} + +	dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz); +	dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz); + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		pipe = &context->res_ctx.pipe_ctx[i]; + +		if (pipe->stream != NULL) { +			dc->hwss.disable_pixel_data(dc, pipe, false); + +			hubp = pipe->plane_res.hubp; +			hubp->funcs->set_blank_regs(hubp, false); +		} +	} +} + + +/** + * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode + * @dc: pointer to dc of the dm calling this + * @enable: True = transition to DC mode, false = transition back to AC mode + * + * Some SoCs define additional clock limits when in DC mode, DM should + * invoke this function when the platform undergoes a power source transition + * so DC can apply/unapply the limit. This interface may be disruptive to + * the onscreen content. + * + * Context: Triggered by OS through DM interface, or manually by escape calls. + * Need to hold a dclock when doing so. + * + * Return: none (void function) + * + */ +void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable) +{ +	uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev; +	unsigned int softMax, maxDPM, funcMin; +	bool p_state_change_support; + +	if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev)) +		return; + +	softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk; +	maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz; +	funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000; +	p_state_change_support = dc->clk_mgr->clks.p_state_change_support; + +	if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) { +		if (p_state_change_support) { +			if (funcMin <= softMax) +				dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax); +			// else: No-Op +		} else { +			if (funcMin <= softMax) +				blank_and_force_memclk(dc, true, softMax); +			// else: No-Op +		} +	} else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) { +		if (p_state_change_support) { +			if (funcMin <= softMax) +				dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM); +			// else: No-Op +		} else { +			if (funcMin <= softMax) +				blank_and_force_memclk(dc, true, maxDPM); +			// else: No-Op +		} +	} +	dc->clk_mgr->dc_mode_softmax_enabled = enable; +}  bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,  		struct dc_cursor_attributes *cursor_attr)  { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 21be2a684393..643762542e4d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -422,6 +422,8 @@ char *dc_status_to_str(enum dc_status status)  		return "The operation is not supported.";  	case DC_UNSUPPORTED_VALUE:  		return "The value specified is not supported."; +	case DC_NO_LINK_ENC_RESOURCE: +		return "No link encoder resource";  	case DC_ERROR_UNEXPECTED:  		return "Unexpected error";  	} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 60544788e911..b5e570d33ca9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -66,31 +66,6 @@  /*******************************************************************************   * Private functions   ******************************************************************************/ -#if defined(CONFIG_DRM_AMD_DC_DCN) -static bool add_dp_hpo_link_encoder_to_link(struct dc_link *link) -{ -	struct hpo_dp_link_encoder *enc = resource_get_unused_hpo_dp_link_encoder( -					link->dc->res_pool); - -	if (!link->hpo_dp_link_enc && enc) { -		link->hpo_dp_link_enc = enc; -		link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter; -		link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source; -	} - -	return (link->hpo_dp_link_enc != NULL); -} - -static void remove_dp_hpo_link_encoder_from_link(struct dc_link *link) -{ -	if (link->hpo_dp_link_enc) { -		link->hpo_dp_link_enc->hpd_source = HPD_SOURCEID_UNKNOWN; -		link->hpo_dp_link_enc->transmitter = TRANSMITTER_UNKNOWN; -		link->hpo_dp_link_enc = NULL; -	} -} -#endif -  static void dc_link_destruct(struct dc_link *link)  {  	int i; @@ -118,12 +93,6 @@ static void dc_link_destruct(struct dc_link *link)  		link->link_enc->funcs->destroy(&link->link_enc);  	} -#if defined(CONFIG_DRM_AMD_DC_DCN) -	if (link->hpo_dp_link_enc) { -		remove_dp_hpo_link_encoder_from_link(link); -	} -#endif -  	if (link->local_sink)  		dc_sink_release(link->local_sink); @@ -270,10 +239,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)  	/* Link may not have physical HPD pin. */  	if (link->ep_type != DISPLAY_ENDPOINT_PHY) { -		if (link->hpd_status) -			*type = dc_connection_single; -		else +		if (link->is_hpd_pending || !link->hpd_status)  			*type = dc_connection_none; +		else +			*type = dc_connection_single;  		return true;  	} @@ -758,6 +727,18 @@ static bool detect_dp(struct dc_link *link,  			dal_ddc_service_set_transaction_type(link->ddc,  							     sink_caps->transaction_type); +#if defined(CONFIG_DRM_AMD_DC_DCN) +			/* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock +			 * reports DSC support. +			 */ +			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && +					link->type == dc_connection_mst_branch && +					link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && +					link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && +					!link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) +				link->wa_flags.dpia_mst_dsc_always_on = true; +#endif +  #if defined(CONFIG_DRM_AMD_DC_HDCP)  			/* In case of fallback to SST when topology discovery below fails  			 * HDCP caps will be querried again later by the upper layer (caller @@ -869,6 +850,7 @@ static bool dc_link_detect_helper(struct dc_link *link,  	enum dc_connection_type pre_connection_type = dc_connection_none;  	bool perform_dp_seamless_boot = false;  	const uint32_t post_oui_delay = 30; // 30ms +	struct link_resource link_res = { 0 };  	DC_LOGGER_INIT(link->ctx->logger); @@ -963,7 +945,10 @@ static bool dc_link_detect_helper(struct dc_link *link,  #if defined(CONFIG_DRM_AMD_DC_DCN)  			if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING) -				add_dp_hpo_link_encoder_to_link(link); +				link_res.hpo_dp_link_enc = resource_get_hpo_dp_link_enc_for_det_lt( +						&link->dc->current_state->res_ctx, +						link->dc->res_pool, +						link);  #endif  			if (link->type == dc_connection_mst_branch) { @@ -974,7 +959,7 @@ static bool dc_link_detect_helper(struct dc_link *link,  				 * empty which leads to allocate_mst_payload() has "0"  				 * pbn_per_slot value leading to exception on dc_fixpt_div()  				 */ -				dp_verify_mst_link_cap(link); +				dp_verify_mst_link_cap(link, &link_res);  				/*  				 * This call will initiate MST topology discovery. Which @@ -1138,6 +1123,7 @@ static bool dc_link_detect_helper(struct dc_link *link,  			// verify link cap for SST non-seamless boot  			if (!perform_dp_seamless_boot)  				dp_verify_link_cap_with_retries(link, +								&link_res,  								&link->reported_link_cap,  								LINK_TRAINING_MAX_VERIFY_RETRY);  		} else { @@ -1203,6 +1189,10 @@ static bool dc_link_detect_helper(struct dc_link *link,  			LINK_INFO("link=%d, mst branch is now Disconnected\n",  				  link->link_index); +			/* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ +			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) +				link->wa_flags.dpia_mst_dsc_always_on = false; +  			dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);  			link->mst_stream_alloc_table.stream_count = 0; @@ -1828,6 +1818,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)  		union down_spread_ctrl old_downspread;  		union down_spread_ctrl new_downspread; +		memset(&old_downspread, 0, sizeof(old_downspread)); +  		core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,  				&old_downspread.raw, sizeof(old_downspread)); @@ -1999,6 +1991,57 @@ static enum dc_status enable_link_dp_mst(  	return enable_link_dp(state, pipe_ctx);  } +void dc_link_blank_all_dp_displays(struct dc *dc) +{ +	unsigned int i; +	uint8_t dpcd_power_state = '\0'; +	enum dc_status status = DC_ERROR_UNEXPECTED; + +	for (i = 0; i < dc->link_count; i++) { +		if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) || +			(dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL)) +			continue; + +		/* DP 2.0 spec requires that we read LTTPR caps first */ +		dp_retrieve_lttpr_cap(dc->links[i]); +		/* if any of the displays are lit up turn them off */ +		status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, +							&dpcd_power_state, sizeof(dpcd_power_state)); + +		if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) +			dc_link_blank_dp_stream(dc->links[i], true); +	} + +} + +void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init) +{ +	unsigned int j; +	struct dc  *dc = link->ctx->dc; +	enum signal_type signal = link->connector_signal; + +	if ((signal == SIGNAL_TYPE_EDP) || +		(signal == SIGNAL_TYPE_DISPLAY_PORT)) { +		if (link->ep_type == DISPLAY_ENDPOINT_PHY && +			link->link_enc->funcs->get_dig_frontend && +			link->link_enc->funcs->is_dig_enabled(link->link_enc)) { +			unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); + +			if (fe != ENGINE_ID_UNKNOWN) +				for (j = 0; j < dc->res_pool->stream_enc_count; j++) { +					if (fe == dc->res_pool->stream_enc[j]->id) { +						dc->res_pool->stream_enc[j]->funcs->dp_blank(link, +									dc->res_pool->stream_enc[j]); +						break; +					} +				} +		} + +		if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) +			dp_receiver_power_ctrl(link, false); +	} +} +  static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,  		enum engine_id eng_id,  		struct ext_hdmi_settings *settings) @@ -2436,7 +2479,8 @@ static void write_i2c_redriver_setting(  		DC_LOG_DEBUG("Set redriver failed");  } -static void disable_link(struct dc_link *link, enum signal_type signal) +static void disable_link(struct dc_link *link, const struct link_resource *link_res, +		enum signal_type signal)  {  	/*  	 * TODO: implement call for dp_set_hw_test_pattern @@ -2455,20 +2499,20 @@ static void disable_link(struct dc_link *link, enum signal_type signal)  		struct dc_link_settings link_settings = link->cur_link_settings;  #endif  		if (dc_is_dp_sst_signal(signal)) -			dp_disable_link_phy(link, signal); +			dp_disable_link_phy(link, link_res, signal);  		else -			dp_disable_link_phy_mst(link, signal); +			dp_disable_link_phy_mst(link, link_res, signal);  		if (dc_is_dp_sst_signal(signal) ||  				link->mst_stream_alloc_table.stream_count == 0) {  #if defined(CONFIG_DRM_AMD_DC_DCN)  			if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {  				dp_set_fec_enable(link, false); -				dp_set_fec_ready(link, false); +				dp_set_fec_ready(link, link_res, false);  			}  #else  			dp_set_fec_enable(link, false); -			dp_set_fec_ready(link, false); +			dp_set_fec_ready(link, link_res, false);  #endif  		}  	} else { @@ -2579,7 +2623,7 @@ static enum dc_status enable_link(  	 * new link settings.  	 */  	if (link->link_status.link_active) { -		disable_link(link, pipe_ctx->stream->signal); +		disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);  	}  	switch (pipe_ctx->stream->signal) { @@ -2699,8 +2743,23 @@ static bool dp_active_dongle_validate_timing(  		return false;  	} +#if defined(CONFIG_DRM_AMD_DC_DCN) +	if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter +		struct dc_crtc_timing outputTiming = *timing; + +		if (timing->flags.DSC && !timing->dsc_cfg.is_frl) +			/* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ +			outputTiming.flags.DSC = 0; +		if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) +			return false; +	} else { // DP to HDMI TMDS converter +		if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) +			return false; +	} +#else  	if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))  		return false; +#endif  #if defined(CONFIG_DRM_AMD_DC_DCN)  	} @@ -2946,7 +3005,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active  		link->psr_settings.psr_power_opt = *power_opts;  		if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) -			psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt); +			psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst);  	}  	/* Enable or Disable PSR */ @@ -3334,11 +3393,12 @@ static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_tim  /*   * Payload allocation/deallocation for SST introduced in DP2.0   */ -enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate) +static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, +						 bool allocate)  {  	struct dc_stream_state *stream = pipe_ctx->stream;  	struct dc_link *link = stream->link; -	struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; +	struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;  	struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;  	struct link_mst_stream_allocation_table proposed_table = {0};  	struct fixed31_32 avg_time_slots_per_mtp; @@ -3420,7 +3480,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)  	struct link_encoder *link_encoder = NULL;  	struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;  #if defined(CONFIG_DRM_AMD_DC_DCN) -	struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; +	struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;  	struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;  #endif  	struct dp_mst_stream_allocation_table proposed_table = {0}; @@ -3750,7 +3810,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)  	struct link_encoder *link_encoder = NULL;  	struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;  #if defined(CONFIG_DRM_AMD_DC_DCN) -	struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc; +	struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc;  	struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;  #endif  	struct dp_mst_stream_allocation_table proposed_table = {0}; @@ -3911,112 +3971,73 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)  static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)  {  	struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; -#if defined(CONFIG_DRM_AMD_DC_DCN)  	struct link_encoder *link_enc = NULL; -	struct dc_state *state = pipe_ctx->stream->ctx->dc->current_state; -	struct link_enc_assignment link_enc_assign; -	int i; -#endif +	struct cp_psp_stream_config config = {0}; +	enum dp_panel_mode panel_mode = +			dp_get_panel_mode(pipe_ctx->stream->link); -	if (cp_psp && cp_psp->funcs.update_stream_config) { -		struct cp_psp_stream_config config = {0}; -		enum dp_panel_mode panel_mode = -				dp_get_panel_mode(pipe_ctx->stream->link); +	if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL) +		return; -		config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; -		/*stream_enc_inst*/ -		config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; -		config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; -#if defined(CONFIG_DRM_AMD_DC_DCN) -		config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; -		 -		if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY || -				pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { -			link_enc = pipe_ctx->stream->link->link_enc; -			config.dio_output_type = pipe_ctx->stream->link->ep_type; -			config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; -			if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) -				link_enc = pipe_ctx->stream->link->link_enc; -			else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) -				if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { -					link_enc = link_enc_cfg_get_link_enc_used_by_stream( -							pipe_ctx->stream->ctx->dc, -							pipe_ctx->stream); -			} -			// Initialize PHY ID with ABCDE - 01234 mapping except when it is B0 -			config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; +	if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY) +		link_enc = pipe_ctx->stream->link->link_enc; +	else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && +			pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) +		link_enc = link_enc_cfg_get_link_enc_used_by_stream( +				pipe_ctx->stream->ctx->dc, +				pipe_ctx->stream); +	ASSERT(link_enc); +	if (link_enc == NULL) +		return; -			//look up the link_enc_assignment for the current pipe_ctx -			for (i = 0; i < state->stream_count; i++) { -				if (pipe_ctx->stream == state->streams[i]) { -					link_enc_assign = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; -				} -			} -			// Add flag to guard new A0 DIG mapping -			if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true) { -				config.dig_be = link_enc_assign.eng_id; -				config.dio_output_type = pipe_ctx->stream->link->ep_type; -				config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; -			} else { -				config.dio_output_type = 0; -				config.dio_output_idx = 0; -			} +	/* otg instance */ +	config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; -			// Add flag to guard B0 implementation -			if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && -					link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { -				if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { -					link_enc = link_enc_assign.stream->link_enc; +	/* dig front end */ +	config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; -					// enum ID 1-4 maps to DPIA PHY ID 0-3 -					config.phy_idx = link_enc_assign.ep_id.link_id.enum_id - ENUM_ID_1; -				} else {  // for non DPIA mode over B0, ABCDE maps to 01564 +	/* stream encoder index */ +	config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; +#if defined(CONFIG_DRM_AMD_DC_DCN) +	if (is_dp_128b_132b_signal(pipe_ctx)) +		config.stream_enc_idx = +				pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; +#endif -					switch (link_enc->transmitter) { -					case TRANSMITTER_UNIPHY_A: -						config.phy_idx = 0; -						break; -					case TRANSMITTER_UNIPHY_B: -						config.phy_idx = 1; -						break; -					case TRANSMITTER_UNIPHY_C: -						config.phy_idx = 5; -						break; -					case TRANSMITTER_UNIPHY_D: -						config.phy_idx = 6; -						break; -					case TRANSMITTER_UNIPHY_E: -						config.phy_idx = 4; -						break; -					default: -						config.phy_idx = 0; -						break; -					} +	/* dig back end */ +	config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; -				} -			} -		} else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) { -			link_enc = link_enc_cfg_get_link_enc_used_by_stream( -					pipe_ctx->stream->ctx->dc, -					pipe_ctx->stream); -			config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */ -		} -		ASSERT(link_enc); -		if (link_enc) -			config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; -		if (is_dp_128b_132b_signal(pipe_ctx)) { -			config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; -			config.link_enc_idx = pipe_ctx->stream->link->hpo_dp_link_enc->inst; -			config.dp2_enabled = 1; -		} +	/* link encoder index */ +	config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; +#if defined(CONFIG_DRM_AMD_DC_DCN) +	if (is_dp_128b_132b_signal(pipe_ctx)) +		config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;  #endif -		config.dpms_off = dpms_off; -		config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; -		config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP); -		config.mst_enabled = (pipe_ctx->stream->signal == -				SIGNAL_TYPE_DISPLAY_PORT_MST); -		cp_psp->funcs.update_stream_config(cp_psp->handle, &config); -	} +	/* dio output index */ +	config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + +	/* phy index */ +	config.phy_idx = resource_transmitter_to_phy_idx( +			pipe_ctx->stream->link->dc, link_enc->transmitter); +	if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) +		/* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */ +		config.phy_idx = 0; + +	/* stream properties */ +	config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; +	config.mst_enabled = (pipe_ctx->stream->signal == +			SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; +#if defined(CONFIG_DRM_AMD_DC_DCN) +	config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; +#endif +	config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? +			1 : 0; +	config.dpms_off = dpms_off; + +	/* dm stream context */ +	config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + +	cp_psp->funcs.update_stream_config(cp_psp->handle, &config);  }  #endif @@ -4037,7 +4058,7 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi  	stream->link->cur_link_settings = link_settings;  	/*  Enable clock, Configure lane count, and Enable Link Encoder*/ -	enable_dp_hpo_output(stream->link, &stream->link->cur_link_settings); +	enable_dp_hpo_output(stream->link, &pipe_ctx->link_res, &stream->link->cur_link_settings);  #ifdef DIAGS_BUILD  	/* Workaround for FPGA HPO capture DP link data: @@ -4087,12 +4108,12 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi  		proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;  	} -	stream->link->hpo_dp_link_enc->funcs->update_stream_allocation_table( -			stream->link->hpo_dp_link_enc, +	pipe_ctx->link_res.hpo_dp_link_enc->funcs->update_stream_allocation_table( +			pipe_ctx->link_res.hpo_dp_link_enc,  			&proposed_table); -	stream->link->hpo_dp_link_enc->funcs->set_throttled_vcp_size( -			stream->link->hpo_dp_link_enc, +	pipe_ctx->link_res.hpo_dp_link_enc->funcs->set_throttled_vcp_size( +			pipe_ctx->link_res.hpo_dp_link_enc,  			pipe_ctx->stream_res.hpo_dp_stream_enc->inst,  			avg_time_slots_per_mtp); @@ -4242,7 +4263,8 @@ void core_link_enable_stream(  		/* eDP lit up by bios already, no need to enable again. */  		if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&  					apply_edp_fast_boot_optimization && -					!pipe_ctx->stream->timing.flags.DSC) { +					!pipe_ctx->stream->timing.flags.DSC && +					!pipe_ctx->next_odm_pipe) {  			pipe_ctx->stream->dpms_off = false;  #if defined(CONFIG_DRM_AMD_DC_HDCP)  			update_psp_stream_config(pipe_ctx, false); @@ -4280,7 +4302,8 @@ void core_link_enable_stream(  			if (status != DC_FAIL_DP_LINK_TRAINING ||  					pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {  				if (false == stream->link->link_status.link_active) -					disable_link(stream->link, pipe_ctx->stream->signal); +					disable_link(stream->link, &pipe_ctx->link_res, +							pipe_ctx->stream->signal);  				BREAK_TO_DEBUGGER();  				return;  			} @@ -4429,14 +4452,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)  		 * state machine.  		 * In DP2 or MST mode, our encoder will stay video active  		 */ -		disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); +		disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);  		dc->hwss.disable_stream(pipe_ctx);  	} else {  		dc->hwss.disable_stream(pipe_ctx); -		disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); +		disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);  	}  #else -	disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); +	disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);  	dc->hwss.disable_stream(pipe_ctx);  #endif @@ -4519,16 +4542,22 @@ void dc_link_set_drive_settings(struct dc *dc,  {  	int i; +	struct pipe_ctx *pipe = NULL; +	const struct link_resource *link_res; -	for (i = 0; i < dc->link_count; i++) { -		if (dc->links[i] == link) -			break; -	} +	link_res = dc_link_get_cur_link_res(link); -	if (i >= dc->link_count) +	for (i = 0; i < MAX_PIPES; i++) { +		pipe = &dc->current_state->res_ctx.pipe_ctx[i]; +		if (pipe->stream && pipe->stream->link) { +			if (pipe->stream->link == link) +				break; +		} +	} +	if (pipe && link_res) +		dc_link_dp_set_drive_settings(pipe->stream->link, link_res, lt_settings); +	else  		ASSERT_CRITICAL(false); - -	dc_link_dp_set_drive_settings(dc->links[i], lt_settings);  }  void dc_link_set_preferred_link_settings(struct dc *dc, @@ -4589,11 +4618,9 @@ void dc_link_set_preferred_training_settings(struct dc *dc,  	if (link_setting != NULL) {  		link->preferred_link_setting = *link_setting;  #if defined(CONFIG_DRM_AMD_DC_DCN) -		if (dp_get_link_encoding_format(link_setting) == -				DP_128b_132b_ENCODING && !link->hpo_dp_link_enc) { -			if (!add_dp_hpo_link_encoder_to_link(link)) -				memset(&link->preferred_link_setting, 0, sizeof(link->preferred_link_setting)); -		} +		if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING) +			/* TODO: add dc update for acquiring link res  */ +			skip_immediate_retrain = true;  #endif  	} else {  		link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; @@ -4720,6 +4747,9 @@ void dc_link_overwrite_extended_receiver_cap(  bool dc_link_is_fec_supported(const struct dc_link *link)  { +	/* TODO - use asic cap instead of link_enc->features +	 * we no longer know which link enc to use for this link before commit +	 */  	struct link_encoder *link_enc = NULL;  	/* Links supporting dynamically assigned link encoder will be assigned next @@ -4749,6 +4779,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)  			link->local_sink &&  			link->local_sink->edid_caps.panel_patch.disable_fec) ||  			(link->connector_signal == SIGNAL_TYPE_EDP +				// enable FEC for EDP if DSC is supported +				&& link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT == false  				))  		is_fec_disable = true; @@ -4812,3 +4844,125 @@ uint32_t dc_bandwidth_in_kbps_from_timing(  	return kbps;  } + +const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link) +{ +	int i; +	struct pipe_ctx *pipe = NULL; +	const struct link_resource *link_res = NULL; + +	for (i = 0; i < MAX_PIPES; i++) { +		pipe = &link->dc->current_state->res_ctx.pipe_ctx[i]; +		if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) { +			if (pipe->stream->link == link) { +				link_res = &pipe->link_res; +				break; +			} +		} +	} + +	return link_res; +} + +/** + * dc_get_cur_link_res_map() - take a snapshot of current link resource allocation state + * @dc: pointer to dc of the dm calling this + * @map: a dc link resource snapshot defined internally to dc. + * + * DM needs to capture a snapshot of current link resource allocation mapping + * and store it in its persistent storage. + * + * Some of the link resource is using first come first serve policy. + * The allocation mapping depends on original hotplug order. This information + * is lost after driver is loaded next time. The snapshot is used in order to + * restore link resource to its previous state so user will get consistent + * link capability allocation across reboot. + * + * Return: none (void function) + * + */ +void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) +{ +#if defined(CONFIG_DRM_AMD_DC_DCN) +	struct dc_link *link; +	uint8_t i; +	uint32_t hpo_dp_recycle_map = 0; + +	*map = 0; + +	if (dc->caps.dp_hpo) { +		for (i = 0; i < dc->caps.max_links; i++) { +			link = dc->links[i]; +			if (link->link_status.link_active && +					dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING && +					dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING) +				/* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability +				 * but current link doesn't use it. +				 */ +				hpo_dp_recycle_map |= (1 << i); +		} +		*map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT); +	} +#endif +} + +/** + * dc_restore_link_res_map() - restore link resource allocation state from a snapshot + * @dc: pointer to dc of the dm calling this + * @map: a dc link resource snapshot defined internally to dc. + * + * DM needs to call this function after initial link detection on boot and + * before first commit streams to restore link resource allocation state + * from previous boot session. + * + * Some of the link resource is using first come first serve policy. + * The allocation mapping depends on original hotplug order. This information + * is lost after driver is loaded next time. The snapshot is used in order to + * restore link resource to its previous state so user will get consistent + * link capability allocation across reboot. + * + * Return: none (void function) + * + */ +void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) +{ +#if defined(CONFIG_DRM_AMD_DC_DCN) +	struct dc_link *link; +	uint8_t i; +	unsigned int available_hpo_dp_count; +	uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK) +			>> LINK_RES_HPO_DP_REC_MAP__SHIFT; + +	if (dc->caps.dp_hpo) { +		available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count; +		/* remove excess 128b/132b encoding support for not recycled links */ +		for (i = 0; i < dc->caps.max_links; i++) { +			if ((hpo_dp_recycle_map & (1 << i)) == 0) { +				link = dc->links[i]; +				if (link->type != dc_connection_none && +						dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { +					if (available_hpo_dp_count > 0) +						available_hpo_dp_count--; +					else +						/* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ +						link->verified_link_cap.link_rate = LINK_RATE_HIGH3; +				} +			} +		} +		/* remove excess 128b/132b encoding support for recycled links */ +		for (i = 0; i < dc->caps.max_links; i++) { +			if ((hpo_dp_recycle_map & (1 << i)) != 0) { +				link = dc->links[i]; +				if (link->type != dc_connection_none && +						dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { +					if (available_hpo_dp_count > 0) +						available_hpo_dp_count--; +					else +						/* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ +						link->verified_link_cap.link_rate = LINK_RATE_HIGH3; +				} +			} +		} +	} +#endif +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 60539b1f2a80..24dc662ec3e4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -626,7 +626,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,  	do {  		struct aux_payload current_payload;  		bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= -				payload->length ? true : false; +				payload->length;  		uint32_t payload_length = is_end_of_payload ?  				payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index cb7bf9148904..05e216524370 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -100,6 +100,7 @@ static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {  #endif  static bool decide_fallback_link_setting( +		struct dc_link *link,  		struct dc_link_settings initial_link_settings,  		struct dc_link_settings *current_link_setting,  		enum link_training_result training_result); @@ -398,6 +399,223 @@ static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)  }  #endif +static void vendor_specific_lttpr_wa_one_start(struct dc_link *link) +{ +	const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0xff}; +	const uint8_t offset = dp_convert_to_count( +			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	uint32_t vendor_lttpr_write_address = 0xF004F; + +	if (offset != 0xFF) +		vendor_lttpr_write_address += +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + +	/* W/A for certain LTTPR to reset their lane settings, part one of two */ +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data[0], +			sizeof(vendor_lttpr_write_data)); +} + +static void vendor_specific_lttpr_wa_one_end( +	struct dc_link *link, +	uint8_t retry_count) +{ +	const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0x0}; +	const uint8_t offset = dp_convert_to_count( +			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	uint32_t vendor_lttpr_write_address = 0xF004F; + +	if (!retry_count) { +		if (offset != 0xFF) +			vendor_lttpr_write_address += +					((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + +		/* W/A for certain LTTPR to reset their lane settings, part two of two */ +		core_link_write_dpcd( +				link, +				vendor_lttpr_write_address, +				&vendor_lttpr_write_data[0], +				sizeof(vendor_lttpr_write_data)); +	} +} + +static void vendor_specific_lttpr_wa_one_two( +	struct dc_link *link, +	const uint8_t rate) +{ +	if (link->apply_vendor_specific_lttpr_link_rate_wa) { +		uint8_t toggle_rate = 0x0; + +		if (rate == 0x6) +			toggle_rate = 0xA; +		else +			toggle_rate = 0x6; + +		if (link->vendor_specific_lttpr_link_rate_wa == rate) { +			/* W/A for certain LTTPR to reset internal state for link training */ +			core_link_write_dpcd( +					link, +					DP_LINK_BW_SET, +					&toggle_rate, +					1); +		} + +		/* Store the last attempted link rate for this link */ +		link->vendor_specific_lttpr_link_rate_wa = rate; +	} +} + +static void vendor_specific_lttpr_wa_three( +	struct dc_link *link, +	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX]) +{ +	const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; +	const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; +	const uint8_t offset = dp_convert_to_count( +			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	uint32_t vendor_lttpr_write_address = 0xF004F; +	uint32_t vendor_lttpr_read_address = 0xF0053; +	uint8_t dprx_vs = 0; +	uint8_t dprx_pe = 0; +	uint8_t lane; + +	if (offset != 0xFF) { +		vendor_lttpr_write_address += +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); +		vendor_lttpr_read_address += +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); +	} + +	/* W/A to read lane settings requested by DPRX */ +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_vs[0], +			sizeof(vendor_lttpr_write_data_vs)); +	core_link_read_dpcd( +			link, +			vendor_lttpr_read_address, +			&dprx_vs, +			1); +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_pe[0], +			sizeof(vendor_lttpr_write_data_pe)); +	core_link_read_dpcd( +			link, +			vendor_lttpr_read_address, +			&dprx_pe, +			1); + +	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { +		dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE = (dprx_vs >> (2 * lane)) & 0x3; +		dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE = (dprx_pe >> (2 * lane)) & 0x3; +	} +} + +static void vendor_specific_lttpr_wa_three_dpcd( +	struct dc_link *link, +	union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) +{ +	union lane_adjust lane_adjust[LANE_COUNT_DP_MAX]; +	uint8_t lane = 0; + +	vendor_specific_lttpr_wa_three(link, lane_adjust); + +	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { +		dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = lane_adjust[lane].bits.VOLTAGE_SWING_LANE; +		dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = lane_adjust[lane].bits.PRE_EMPHASIS_LANE; +	} +} + +static void vendor_specific_lttpr_wa_four( +	struct dc_link *link, +	bool apply_wa) +{ +	const uint8_t vendor_lttpr_write_data_one[4] = {0x1, 0x55, 0x63, 0x8}; +	const uint8_t vendor_lttpr_write_data_two[4] = {0x1, 0x55, 0x63, 0x0}; +	const uint8_t offset = dp_convert_to_count( +			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	uint32_t vendor_lttpr_write_address = 0xF004F; +#if defined(CONFIG_DRM_AMD_DC_DP2_0) +	uint8_t sink_status = 0; +	uint8_t i; +#endif + +	if (offset != 0xFF) +		vendor_lttpr_write_address += +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + +	/* W/A to pass through DPCD write of TPS=0 to DPRX */ +	if (apply_wa) { +		core_link_write_dpcd( +				link, +				vendor_lttpr_write_address, +				&vendor_lttpr_write_data_one[0], +				sizeof(vendor_lttpr_write_data_one)); +	} + +	/* clear training pattern set */ +	dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + +	if (apply_wa) { +		core_link_write_dpcd( +				link, +				vendor_lttpr_write_address, +				&vendor_lttpr_write_data_two[0], +				sizeof(vendor_lttpr_write_data_two)); +	} + +#if defined(CONFIG_DRM_AMD_DC_DP2_0) +	/* poll for intra-hop disable */ +	for (i = 0; i < 10; i++) { +		if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && +				(sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) +			break; +		udelay(1000); +	} +#endif +} + +static void vendor_specific_lttpr_wa_five( +	struct dc_link *link, +	const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], +	uint8_t lane_count) +{ +	const uint32_t vendor_lttpr_write_address = 0xF004F; +	const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; +	uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; +	uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; +	uint8_t lane = 0; + +	for (lane = 0; lane < lane_count; lane++) { +		vendor_lttpr_write_data_vs[3] |= +				dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); +		vendor_lttpr_write_data_pe[3] |= +				dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); +	} + +	/* Force LTTPR to output desired VS and PE */ +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_reset[0], +			sizeof(vendor_lttpr_write_data_reset)); +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_vs[0], +			sizeof(vendor_lttpr_write_data_vs)); +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_pe[0], +			sizeof(vendor_lttpr_write_data_pe)); +} +  enum dc_status dpcd_set_link_settings(  	struct dc_link *link,  	const struct link_training_settings *lt_settings) @@ -430,7 +648,7 @@ enum dc_status dpcd_set_link_settings(  	status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,  		&lane_count_set.raw, 1); -	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && +	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&  			lt_settings->link_settings.use_link_rate_set == true) {  		rate = 0;  		/* WA for some MUX chips that will power down with eDP and lose supported @@ -452,6 +670,15 @@ enum dc_status dpcd_set_link_settings(  #else  		rate = (uint8_t) (lt_settings->link_settings.link_rate);  #endif +		if (link->dc->debug.apply_vendor_specific_lttpr_wa && +					(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +					link->lttpr_mode == LTTPR_MODE_TRANSPARENT) +			vendor_specific_lttpr_wa_one_start(link); + +		if (link->dc->debug.apply_vendor_specific_lttpr_wa && +					(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) +			vendor_specific_lttpr_wa_one_two(link, rate); +  		status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);  	} @@ -1024,6 +1251,7 @@ bool dp_is_max_vs_reached(  static bool perform_post_lt_adj_req_sequence(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct link_training_settings *lt_settings)  {  	enum dc_lane_count lane_count = @@ -1087,6 +1315,7 @@ static bool perform_post_lt_adj_req_sequence(  						lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);  				dc_link_dp_set_drive_settings(link, +						link_res,  						lt_settings);  				break;  			} @@ -1161,6 +1390,7 @@ enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,  static enum link_training_result perform_channel_equalization_sequence(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct link_training_settings *lt_settings,  	uint32_t offset)  { @@ -1183,12 +1413,12 @@ static enum link_training_result perform_channel_equalization_sequence(  		tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;  #endif -	dp_set_hw_training_pattern(link, tr_pattern, offset); +	dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);  	for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;  		retries_ch_eq++) { -		dp_set_hw_lane_settings(link, lt_settings, offset); +		dp_set_hw_lane_settings(link, link_res, lt_settings, offset);  		/* 2. update DPCD*/  		if (!retries_ch_eq) @@ -1211,6 +1441,12 @@ static enum link_training_result perform_channel_equalization_sequence(  					dp_translate_training_aux_read_interval(  						link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); +		if (link->dc->debug.apply_vendor_specific_lttpr_wa && +				(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +				link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { +			wait_time_microsec = 16000; +		} +  		dp_wait_for_training_aux_rd_interval(  				link,  				wait_time_microsec); @@ -1246,18 +1482,20 @@ static enum link_training_result perform_channel_equalization_sequence(  }  static void start_clock_recovery_pattern_early(struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings,  		uint32_t offset)  {  	DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",  			__func__); -	dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset); -	dp_set_hw_lane_settings(link, lt_settings, offset); +	dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); +	dp_set_hw_lane_settings(link, link_res, lt_settings, offset);  	udelay(400);  }  static enum link_training_result perform_clock_recovery_sequence(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct link_training_settings *lt_settings,  	uint32_t offset)  { @@ -1273,7 +1511,7 @@ static enum link_training_result perform_clock_recovery_sequence(  	retry_count = 0;  	if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) -		dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, offset); +		dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);  	/* najeeb - The synaptics MST hub can put the LT in  	* infinite loop by switching the VS @@ -1290,6 +1528,7 @@ static enum link_training_result perform_clock_recovery_sequence(  		/* 1. call HWSS to set lane settings*/  		dp_set_hw_lane_settings(  				link, +				link_res,  				lt_settings,  				offset); @@ -1311,8 +1550,10 @@ static enum link_training_result perform_clock_recovery_sequence(  		/* 3. wait receiver to lock-on*/  		wait_time_microsec = lt_settings->cr_pattern_time; -		if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) -			wait_time_microsec = TRAINING_AUX_RD_INTERVAL; +		if (link->dc->debug.apply_vendor_specific_lttpr_wa && +				(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) { +			wait_time_microsec = 16000; +		}  		dp_wait_for_training_aux_rd_interval(  				link, @@ -1329,6 +1570,13 @@ static enum link_training_result perform_clock_recovery_sequence(  				dpcd_lane_adjust,  				offset); +		if (link->dc->debug.apply_vendor_specific_lttpr_wa && +				(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +				link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { +			vendor_specific_lttpr_wa_one_end(link, retry_count); +			vendor_specific_lttpr_wa_three(link, dpcd_lane_adjust); +		} +  		/* 5. check CR done*/  		if (dp_is_cr_done(lane_count, dpcd_lane_status))  			return LINK_TRAINING_SUCCESS; @@ -1379,13 +1627,14 @@ static enum link_training_result perform_clock_recovery_sequence(  static inline enum link_training_result dp_transition_to_video_idle(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct link_training_settings *lt_settings,  	enum link_training_result status)  {  	union lane_count_set lane_count_set = {0};  	/* 4. mainlink output idle pattern*/ -	dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); +	dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);  	/*  	 * 5. post training adjust if required @@ -1409,7 +1658,7 @@ static inline enum link_training_result dp_transition_to_video_idle(  	}  	if (status == LINK_TRAINING_SUCCESS && -		perform_post_lt_adj_req_sequence(link, lt_settings) == false) +		perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false)  		status = LINK_TRAINING_LQA_FAIL;  	lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; @@ -1852,10 +2101,11 @@ static void print_status_message(  void dc_link_dp_set_drive_settings(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct link_training_settings *lt_settings)  {  	/* program ASIC PHY settings*/ -	dp_set_hw_lane_settings(link, lt_settings, DPRX); +	dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);  	dp_hw_to_dpcd_lane_settings(lt_settings,  			lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); @@ -1866,6 +2116,7 @@ void dc_link_dp_set_drive_settings(  bool dc_link_dp_perform_link_training_skip_aux(  	struct dc_link *link, +	const struct link_resource *link_res,  	const struct dc_link_settings *link_setting)  {  	struct link_training_settings lt_settings = {0}; @@ -1882,10 +2133,10 @@ bool dc_link_dp_perform_link_training_skip_aux(  	/* 1. Perform_clock_recovery_sequence. */  	/* transmit training pattern for clock recovery */ -	dp_set_hw_training_pattern(link, lt_settings.pattern_for_cr, DPRX); +	dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX);  	/* call HWSS to set lane settings*/ -	dp_set_hw_lane_settings(link, <_settings, DPRX); +	dp_set_hw_lane_settings(link, link_res, <_settings, DPRX);  	/* wait receiver to lock-on*/  	dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); @@ -1893,10 +2144,10 @@ bool dc_link_dp_perform_link_training_skip_aux(  	/* 2. Perform_channel_equalization_sequence. */  	/* transmit training pattern for channel equalization. */ -	dp_set_hw_training_pattern(link, lt_settings.pattern_for_eq, DPRX); +	dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX);  	/* call HWSS to set lane settings*/ -	dp_set_hw_lane_settings(link, <_settings, DPRX); +	dp_set_hw_lane_settings(link, link_res, <_settings, DPRX);  	/* wait receiver to lock-on. */  	dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); @@ -1904,7 +2155,7 @@ bool dc_link_dp_perform_link_training_skip_aux(  	/* 3. Perform_link_training_int. */  	/* Mainlink output idle pattern. */ -	dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); +	dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);  	print_status_message(link, <_settings, LINK_TRAINING_SUCCESS); @@ -1985,6 +2236,7 @@ static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,  static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(  		struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings)  {  	uint8_t loop_count; @@ -1996,7 +2248,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(  	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};  	/* Transmit 128b/132b_TPS1 over Main-Link */ -	dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX); +	dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX);  	/* Set TRAINING_PATTERN_SET to 01h */  	dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); @@ -2006,8 +2258,8 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(  			&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);  	dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,  			lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); -	dp_set_hw_lane_settings(link, lt_settings, DPRX); -	dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX); +	dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); +	dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX);  	/* Set loop counter to start from 1 */  	loop_count = 1; @@ -2034,7 +2286,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(  		} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {  			status = DP_128b_132b_LT_FAILED;  		} else { -			dp_set_hw_lane_settings(link, lt_settings, DPRX); +			dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);  			dpcd_set_lane_settings(link, lt_settings, DPRX);  		}  		loop_count++; @@ -2063,6 +2315,7 @@ static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(  static enum link_training_result dp_perform_128b_132b_cds_done_sequence(  		struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings)  {  	/* Assumption: assume hardware has transmitted eq pattern */ @@ -2099,6 +2352,7 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence(  static enum link_training_result dp_perform_8b_10b_link_training(  		struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings)  {  	enum link_training_result status = LINK_TRAINING_SUCCESS; @@ -2108,7 +2362,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(  	uint8_t lane = 0;  	if (link->ctx->dc->work_arounds.lt_early_cr_pattern) -		start_clock_recovery_pattern_early(link, lt_settings, DPRX); +		start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);  	/* 1. set link rate, lane count and spread. */  	dpcd_set_link_settings(link, lt_settings); @@ -2122,12 +2376,13 @@ static enum link_training_result dp_perform_8b_10b_link_training(  		for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);  				repeater_id--) { -			status = perform_clock_recovery_sequence(link, lt_settings, repeater_id); +			status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);  			if (status != LINK_TRAINING_SUCCESS)  				break;  			status = perform_channel_equalization_sequence(link, +					link_res,  					lt_settings,  					repeater_id); @@ -2138,13 +2393,14 @@ static enum link_training_result dp_perform_8b_10b_link_training(  		}  		for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++) -			lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0; +			lt_settings->dpcd_lane_settings[lane].raw = 0;  	}  	if (status == LINK_TRAINING_SUCCESS) { -		status = perform_clock_recovery_sequence(link, lt_settings, DPRX); +		status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX);  	if (status == LINK_TRAINING_SUCCESS) {  		status = perform_channel_equalization_sequence(link, +					link_res,  					lt_settings,  					DPRX);  		} @@ -2156,6 +2412,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(  #if defined(CONFIG_DRM_AMD_DC_DCN)  static enum link_training_result dp_perform_128b_132b_link_training(  		struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings)  {  	enum link_training_result result = LINK_TRAINING_SUCCESS; @@ -2167,23 +2424,358 @@ static enum link_training_result dp_perform_128b_132b_link_training(  		decide_8b_10b_training_settings(link,  				<_settings->link_settings,  				&legacy_settings); -		return dp_perform_8b_10b_link_training(link, &legacy_settings); +		return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);  	}  	dpcd_set_link_settings(link, lt_settings);  	if (result == LINK_TRAINING_SUCCESS) -		result = dp_perform_128b_132b_channel_eq_done_sequence(link, lt_settings); +		result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings);  	if (result == LINK_TRAINING_SUCCESS) -		result = dp_perform_128b_132b_cds_done_sequence(link, lt_settings); +		result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings);  	return result;  }  #endif +static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequence( +	struct dc_link *link, +	const struct link_resource *link_res, +	struct link_training_settings *lt_settings) +{ +	const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; +	const uint8_t offset = dp_convert_to_count( +			link->dpcd_caps.lttpr_caps.phy_repeater_cnt); +	const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; +	const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; +	uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; +	uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; +	uint32_t vendor_lttpr_write_address = 0xF004F; +	enum link_training_result status = LINK_TRAINING_SUCCESS; +	uint8_t lane = 0; +	union down_spread_ctrl downspread = {0}; +	union lane_count_set lane_count_set = {0}; +	uint8_t toggle_rate; +	uint8_t rate; + +	/* Only 8b/10b is supported */ +	ASSERT(dp_get_link_encoding_format(<_settings->link_settings) == +			DP_8b_10b_ENCODING); + +	if (offset != 0xFF) { +		vendor_lttpr_write_address += +				((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); +	} + +	/* Vendor specific: Reset lane settings */ +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_reset[0], +			sizeof(vendor_lttpr_write_data_reset)); +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_vs[0], +			sizeof(vendor_lttpr_write_data_vs)); +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_pe[0], +			sizeof(vendor_lttpr_write_data_pe)); + +	/* Vendor specific: Enable intercept */ +	core_link_write_dpcd( +			link, +			vendor_lttpr_write_address, +			&vendor_lttpr_write_data_intercept_en[0], +			sizeof(vendor_lttpr_write_data_intercept_en)); + +	/* 1. set link rate, lane count and spread. */ + +	downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); + +	lane_count_set.bits.LANE_COUNT_SET = +	lt_settings->link_settings.lane_count; + +	lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; +	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + +	if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { +		lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = +				link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; +	} + +	core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, +		&downspread.raw, sizeof(downspread)); + +	core_link_write_dpcd(link, DP_LANE_COUNT_SET, +		&lane_count_set.raw, 1); + +#if defined(CONFIG_DRM_AMD_DC_DCN) +	rate = get_dpcd_link_rate(<_settings->link_settings); +#else +	rate = (uint8_t) (lt_settings->link_settings.link_rate); +#endif + +	/* Vendor specific: Toggle link rate */ +	toggle_rate = (rate == 0x6) ? 0xA : 0x6; + +	if (link->vendor_specific_lttpr_link_rate_wa == rate) { +		core_link_write_dpcd( +				link, +				DP_LINK_BW_SET, +				&toggle_rate, +				1); +	} + +	link->vendor_specific_lttpr_link_rate_wa = rate; + +	core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + +	DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", +		__func__, +		DP_LINK_BW_SET, +		lt_settings->link_settings.link_rate, +		DP_LANE_COUNT_SET, +		lt_settings->link_settings.lane_count, +		lt_settings->enhanced_framing, +		DP_DOWNSPREAD_CTRL, +		lt_settings->link_settings.link_spread); + +	/* 2. Perform link training */ + +	/* Perform Clock Recovery Sequence */ +	if (status == LINK_TRAINING_SUCCESS) { +		uint32_t retries_cr; +		uint32_t retry_count; +		uint32_t wait_time_microsec; +		enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; +		union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; +		union lane_align_status_updated dpcd_lane_status_updated; +		union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + +		retries_cr = 0; +		retry_count = 0; + +		while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && +			(retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + +			memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); +			memset(&dpcd_lane_status_updated, '\0', +			sizeof(dpcd_lane_status_updated)); + +			/* 1. call HWSS to set lane settings */ +			dp_set_hw_lane_settings( +					link, +					link_res, +					lt_settings, +					0); + +			/* 2. update DPCD of the receiver */ +			if (!retry_count) { +				/* EPR #361076 - write as a 5-byte burst, +				 * but only for the 1-st iteration. +				 */ +				dpcd_set_lt_pattern_and_lane_settings( +						link, +						lt_settings, +						lt_settings->pattern_for_cr, +						0); +				/* Vendor specific: Disable intercept */ +				core_link_write_dpcd( +						link, +						vendor_lttpr_write_address, +						&vendor_lttpr_write_data_intercept_dis[0], +						sizeof(vendor_lttpr_write_data_intercept_dis)); +			} else { +				vendor_lttpr_write_data_vs[3] = 0; +				vendor_lttpr_write_data_pe[3] = 0; + +				for (lane = 0; lane < lane_count; lane++) { +					vendor_lttpr_write_data_vs[3] |= +							lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); +					vendor_lttpr_write_data_pe[3] |= +							lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); +				} + +				/* Vendor specific: Update VS and PE to DPRX requested value */ +				core_link_write_dpcd( +						link, +						vendor_lttpr_write_address, +						&vendor_lttpr_write_data_vs[0], +						sizeof(vendor_lttpr_write_data_vs)); +				core_link_write_dpcd( +						link, +						vendor_lttpr_write_address, +						&vendor_lttpr_write_data_pe[0], +						sizeof(vendor_lttpr_write_data_pe)); + +				dpcd_set_lane_settings( +						link, +						lt_settings, +						0); +			} + +			/* 3. wait receiver to lock-on*/ +			wait_time_microsec = lt_settings->cr_pattern_time; + +			dp_wait_for_training_aux_rd_interval( +					link, +					wait_time_microsec); + +			/* 4. Read lane status and requested drive +			 * settings as set by the sink +			 */ +			dp_get_lane_status_and_lane_adjust( +					link, +					lt_settings, +					dpcd_lane_status, +					&dpcd_lane_status_updated, +					dpcd_lane_adjust, +					0); + +			/* 5. check CR done*/ +			if (dp_is_cr_done(lane_count, dpcd_lane_status)) { +				status = LINK_TRAINING_SUCCESS; +				break; +			} + +			/* 6. max VS reached*/ +			if (dp_is_max_vs_reached(lt_settings)) +				break; + +			/* 7. same lane settings */ +			/* Note: settings are the same for all lanes, +			 * so comparing first lane is sufficient +			 */ +			if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == +					dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) +				retries_cr++; +			else +				retries_cr = 0; + +			/* 8. update VS/PE/PC2 in lt_settings*/ +			dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, +					lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +			retry_count++; +		} + +		if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { +			ASSERT(0); +			DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", +				__func__, +				LINK_TRAINING_MAX_CR_RETRY); + +		} + +		status = dp_get_cr_failure(lane_count, dpcd_lane_status); +	} + +	/* Perform Channel EQ Sequence */ +	if (status == LINK_TRAINING_SUCCESS) { +		enum dc_dp_training_pattern tr_pattern; +		uint32_t retries_ch_eq; +		uint32_t wait_time_microsec; +		enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; +		union lane_align_status_updated dpcd_lane_status_updated = {0}; +		union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; +		union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + +		/* Note: also check that TPS4 is a supported feature*/ +		tr_pattern = lt_settings->pattern_for_eq; + +		dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); + +		status = LINK_TRAINING_EQ_FAIL_EQ; + +		for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; +			retries_ch_eq++) { + +			dp_set_hw_lane_settings(link, link_res, lt_settings, 0); + +			vendor_lttpr_write_data_vs[3] = 0; +			vendor_lttpr_write_data_pe[3] = 0; + +			for (lane = 0; lane < lane_count; lane++) { +				vendor_lttpr_write_data_vs[3] |= +						lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); +				vendor_lttpr_write_data_pe[3] |= +						lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); +			} + +			/* Vendor specific: Update VS and PE to DPRX requested value */ +			core_link_write_dpcd( +					link, +					vendor_lttpr_write_address, +					&vendor_lttpr_write_data_vs[0], +					sizeof(vendor_lttpr_write_data_vs)); +			core_link_write_dpcd( +					link, +					vendor_lttpr_write_address, +					&vendor_lttpr_write_data_pe[0], +					sizeof(vendor_lttpr_write_data_pe)); + +			/* 2. update DPCD*/ +			if (!retries_ch_eq) +				/* EPR #361076 - write as a 5-byte burst, +				 * but only for the 1-st iteration +				 */ + +				dpcd_set_lt_pattern_and_lane_settings( +					link, +					lt_settings, +					tr_pattern, 0); +			else +				dpcd_set_lane_settings(link, lt_settings, 0); + +			/* 3. wait for receiver to lock-on*/ +			wait_time_microsec = lt_settings->eq_pattern_time; + +			dp_wait_for_training_aux_rd_interval( +					link, +					wait_time_microsec); + +			/* 4. Read lane status and requested +			 * drive settings as set by the sink +			 */ +			dp_get_lane_status_and_lane_adjust( +				link, +				lt_settings, +				dpcd_lane_status, +				&dpcd_lane_status_updated, +				dpcd_lane_adjust, +				0); + +			/* 5. check CR done*/ +			if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { +				status = LINK_TRAINING_EQ_FAIL_CR; +				break; +			} + +			/* 6. check CHEQ done*/ +			if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && +					dp_is_symbol_locked(lane_count, dpcd_lane_status) && +					dp_is_interlane_aligned(dpcd_lane_status_updated)) { +				status = LINK_TRAINING_SUCCESS; +				break; +			} + +			/* 7. update VS/PE/PC2 in lt_settings*/ +			dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, +					lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +		} +	} + +	return status; +} + +  enum link_training_result dc_link_dp_perform_link_training(  	struct dc_link *link, +	const struct link_resource *link_res,  	const struct dc_link_settings *link_settings,  	bool skip_video_pattern)  { @@ -2203,30 +2795,51 @@ enum link_training_result dc_link_dp_perform_link_training(  			<_settings);  	/* reset previous training states */ -	dpcd_exit_training_mode(link); +	if (link->dc->debug.apply_vendor_specific_lttpr_wa && +			(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +			link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { +		link->apply_vendor_specific_lttpr_link_rate_wa = true; +		vendor_specific_lttpr_wa_four(link, true); +	} else { +		dpcd_exit_training_mode(link); +	}  	/* configure link prior to entering training mode */  	dpcd_configure_lttpr_mode(link, <_settings); -	dp_set_fec_ready(link, lt_settings.should_set_fec_ready); +	dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);  	dpcd_configure_channel_coding(link, <_settings);  	/* enter training mode:  	 * Per DP specs starting from here, DPTX device shall not issue  	 * Non-LT AUX transactions inside training mode.  	 */ -	if (encoding == DP_8b_10b_ENCODING) -		status = dp_perform_8b_10b_link_training(link, <_settings); +	if (!link->dc->debug.apply_vendor_specific_lttpr_wa && +			(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +			link->lttpr_mode == LTTPR_MODE_TRANSPARENT) +		status = dc_link_dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); +	else if (encoding == DP_8b_10b_ENCODING) +		status = dp_perform_8b_10b_link_training(link, link_res, <_settings);  #if defined(CONFIG_DRM_AMD_DC_DCN)  	else if (encoding == DP_128b_132b_ENCODING) -		status = dp_perform_128b_132b_link_training(link, <_settings); +		status = dp_perform_128b_132b_link_training(link, link_res, <_settings);  #endif  	else  		ASSERT(0); -	/* exit training mode and switch to video idle */ -	dpcd_exit_training_mode(link); +	/* exit training mode */ +	if (link->dc->debug.apply_vendor_specific_lttpr_wa && +			(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +			link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { +		link->apply_vendor_specific_lttpr_link_rate_wa = false; +		vendor_specific_lttpr_wa_four(link, (status != LINK_TRAINING_SUCCESS)); +	} else { +		dpcd_exit_training_mode(link); +	} + +	/* switch to video idle */  	if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)  		status = dp_transition_to_video_idle(link, +				link_res,  				<_settings,  				status); @@ -2278,6 +2891,7 @@ bool perform_link_training_with_retries(  		dp_enable_link_phy(  			link, +			&pipe_ctx->link_res,  			signal,  			pipe_ctx->clock_source->id,  			¤t_setting); @@ -2305,23 +2919,24 @@ bool perform_link_training_with_retries(  		dp_set_panel_mode(link, panel_mode);  		if (link->aux_access_disabled) { -			dc_link_dp_perform_link_training_skip_aux(link, ¤t_setting); +			dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, ¤t_setting);  			return true;  		} else {  			/** @todo Consolidate USB4 DP and DPx.x training. */  			if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {  				status = dc_link_dpia_perform_link_training(link, -									    ¤t_setting, -									    skip_video_pattern); +						&pipe_ctx->link_res, +						¤t_setting, +						skip_video_pattern);  				/* Transmit idle pattern once training successful. */  				if (status == LINK_TRAINING_SUCCESS) -					dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, -							       NULL, 0); +					dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);  			} else {  				status = dc_link_dp_perform_link_training(link, -									  ¤t_setting, -									  skip_video_pattern); +						&pipe_ctx->link_res, +						¤t_setting, +						skip_video_pattern);  			}  			if (status == LINK_TRAINING_SUCCESS) @@ -2336,7 +2951,7 @@ bool perform_link_training_with_retries(  		DC_LOG_WARNING("%s: Link training attempt %u of %d failed\n",  			__func__, (unsigned int)j + 1, attempts); -		dp_disable_link_phy(link, signal); +		dp_disable_link_phy(link, &pipe_ctx->link_res, signal);  		/* Abort link training if failure due to sink being unplugged. */  		if (status == LINK_TRAINING_ABORT) { @@ -2349,7 +2964,7 @@ bool perform_link_training_with_retries(  			uint32_t req_bw;  			uint32_t link_bw; -			decide_fallback_link_setting(*link_setting, ¤t_setting, status); +			decide_fallback_link_setting(link, *link_setting, ¤t_setting, status);  			/* Fail link training if reduced link bandwidth no longer meets  			 * stream requirements.  			 */ @@ -2385,12 +3000,13 @@ static enum clock_source_id get_clock_source_id(struct dc_link *link)  	return dp_cs_id;  } -static void set_dp_mst_mode(struct dc_link *link, bool mst_enable) +static void set_dp_mst_mode(struct dc_link *link, const struct link_resource *link_res, +		bool mst_enable)  {  	if (mst_enable == false &&  		link->type == dc_connection_mst_branch) {  		/* Disable MST on link. Use only local sink. */ -		dp_disable_link_phy_mst(link, link->connector_signal); +		dp_disable_link_phy_mst(link, link_res, link->connector_signal);  		link->type = dc_connection_single;  		link->local_sink = link->remote_sinks[0]; @@ -2401,7 +3017,7 @@ static void set_dp_mst_mode(struct dc_link *link, bool mst_enable)  			link->type == dc_connection_single &&  			link->remote_sinks[0] != NULL) {  		/* Re-enable MST on link. */ -		dp_disable_link_phy(link, link->connector_signal); +		dp_disable_link_phy(link, link_res, link->connector_signal);  		dp_enable_mst_on_sink(link, true);  		link->type = dc_connection_mst_branch; @@ -2427,6 +3043,7 @@ bool dc_link_dp_sync_lt_begin(struct dc_link *link)  enum link_training_result dc_link_dp_sync_lt_attempt(      struct dc_link *link, +    const struct link_resource *link_res,      struct dc_link_settings *link_settings,      struct dc_link_training_overrides *lt_overrides)  { @@ -2446,14 +3063,14 @@ enum link_training_result dc_link_dp_sync_lt_attempt(  			<_settings);  	/* Setup MST Mode */  	if (lt_overrides->mst_enable) -		set_dp_mst_mode(link, *lt_overrides->mst_enable); +		set_dp_mst_mode(link, link_res, *lt_overrides->mst_enable);  	/* Disable link */ -	dp_disable_link_phy(link, link->connector_signal); +	dp_disable_link_phy(link, link_res, link->connector_signal);  	/* Enable link */  	dp_cs_id = get_clock_source_id(link); -	dp_enable_link_phy(link, link->connector_signal, +	dp_enable_link_phy(link, link_res, link->connector_signal,  		dp_cs_id, link_settings);  	/* Set FEC enable */ @@ -2461,7 +3078,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(  	if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {  #endif  		fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; -		dp_set_fec_ready(link, fec_enable); +		dp_set_fec_ready(link, NULL, fec_enable);  #if defined(CONFIG_DRM_AMD_DC_DCN)  	}  #endif @@ -2478,7 +3095,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(  	/* Attempt to train with given link training settings */  	if (link->ctx->dc->work_arounds.lt_early_cr_pattern) -		start_clock_recovery_pattern_early(link, <_settings, DPRX); +		start_clock_recovery_pattern_early(link, link_res, <_settings, DPRX);  	/* Set link rate, lane count and spread. */  	dpcd_set_link_settings(link, <_settings); @@ -2486,9 +3103,10 @@ enum link_training_result dc_link_dp_sync_lt_attempt(  	/* 2. perform link training (set link training done  	 *  to false is done as well)  	 */ -	lt_status = perform_clock_recovery_sequence(link, <_settings, DPRX); +	lt_status = perform_clock_recovery_sequence(link, link_res, <_settings, DPRX);  	if (lt_status == LINK_TRAINING_SUCCESS) {  		lt_status = perform_channel_equalization_sequence(link, +						link_res,  						<_settings,  						DPRX);  	} @@ -2509,11 +3127,11 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)  #if defined(CONFIG_DRM_AMD_DC_DCN)  		struct dc_link_settings link_settings = link->cur_link_settings;  #endif -		dp_disable_link_phy(link, link->connector_signal); +		dp_disable_link_phy(link, NULL, link->connector_signal);  #if defined(CONFIG_DRM_AMD_DC_DCN)  		if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)  #endif -			dp_set_fec_ready(link, false); +			dp_set_fec_ready(link, NULL, false);  	}  	link->sync_lt_in_progress = false; @@ -2568,7 +3186,8 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_  	return false;  } -static struct dc_link_settings get_max_link_cap(struct dc_link *link) +static struct dc_link_settings get_max_link_cap(struct dc_link *link, +		const struct link_resource *link_res)  {  	struct dc_link_settings max_link_cap = {0};  #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -2592,9 +3211,11 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)  	if (link_enc)  		link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);  #if defined(CONFIG_DRM_AMD_DC_DCN) -	if (max_link_cap.link_rate >= LINK_RATE_UHBR10 && -			!link->hpo_dp_link_enc) -		max_link_cap.link_rate = LINK_RATE_HIGH3; +	if (max_link_cap.link_rate >= LINK_RATE_UHBR10) { +		if (!link_res->hpo_dp_link_enc || +				link->dc->debug.disable_uhbr) +			max_link_cap.link_rate = LINK_RATE_HIGH3; +	}  #endif  	/* Lower link settings based on sink's link cap */ @@ -2612,7 +3233,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)  	 * account for lttpr repeaters cap  	 * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).  	 */ -	if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { +	if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) {  		if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)  			max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; @@ -2751,6 +3372,7 @@ bool hpd_rx_irq_check_link_loss_status(  bool dp_verify_link_cap(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct dc_link_settings *known_limit_link_setting,  	int *fail_count)  { @@ -2768,7 +3390,7 @@ bool dp_verify_link_cap(  	/* link training starts with the maximum common settings  	 * supported by both sink and ASIC.  	 */ -	max_link_cap = get_max_link_cap(link); +	max_link_cap = get_max_link_cap(link, link_res);  	initial_link_settings = get_common_supported_link_settings(  			*known_limit_link_setting,  			max_link_cap); @@ -2808,7 +3430,7 @@ bool dp_verify_link_cap(  	 * find the physical link capability  	 */  	/* disable PHY done possible by BIOS, will be done by driver itself */ -	dp_disable_link_phy(link, link->connector_signal); +	dp_disable_link_phy(link, link_res, link->connector_signal);  	dp_cs_id = get_clock_source_id(link); @@ -2820,8 +3442,8 @@ bool dp_verify_link_cap(  	 */  	if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&  			link->dc->debug.usbc_combo_phy_reset_wa) { -		dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur); -		dp_disable_link_phy(link, link->connector_signal); +		dp_enable_link_phy(link, link_res, link->connector_signal, dp_cs_id, cur); +		dp_disable_link_phy(link, link_res, link->connector_signal);  	}  	do { @@ -2832,6 +3454,7 @@ bool dp_verify_link_cap(  		dp_enable_link_phy(  				link, +				link_res,  				link->connector_signal,  				dp_cs_id,  				cur); @@ -2842,6 +3465,7 @@ bool dp_verify_link_cap(  		else {  			status = dc_link_dp_perform_link_training(  							link, +							link_res,  							cur,  							skip_video_pattern);  			if (status == LINK_TRAINING_SUCCESS) @@ -2863,8 +3487,8 @@ bool dp_verify_link_cap(  		 * setting or before returning we'll enable it later  		 * based on the actual mode we're driving  		 */ -		dp_disable_link_phy(link, link->connector_signal); -	} while (!success && decide_fallback_link_setting( +		dp_disable_link_phy(link, link_res, link->connector_signal); +	} while (!success && decide_fallback_link_setting(link,  			initial_link_settings, cur, status));  	/* Link Training failed for all Link Settings @@ -2887,6 +3511,7 @@ bool dp_verify_link_cap(  bool dp_verify_link_cap_with_retries(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct dc_link_settings *known_limit_link_setting,  	int attempts)  { @@ -2904,7 +3529,7 @@ bool dp_verify_link_cap_with_retries(  			link->verified_link_cap.link_rate = LINK_RATE_LOW;  			link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;  			break; -		} else if (dp_verify_link_cap(link, +		} else if (dp_verify_link_cap(link, link_res,  				known_limit_link_setting,  				&fail_count) && fail_count == 0) {  			success = true; @@ -2916,13 +3541,13 @@ bool dp_verify_link_cap_with_retries(  }  bool dp_verify_mst_link_cap( -	struct dc_link *link) +	struct dc_link *link, const struct link_resource *link_res)  {  	struct dc_link_settings max_link_cap = {0};  	if (dp_get_link_encoding_format(&link->reported_link_cap) ==  			DP_8b_10b_ENCODING) { -		max_link_cap = get_max_link_cap(link); +		max_link_cap = get_max_link_cap(link, link_res);  		link->verified_link_cap = get_common_supported_link_settings(  				link->reported_link_cap,  				max_link_cap); @@ -2931,6 +3556,7 @@ bool dp_verify_mst_link_cap(  	else if (dp_get_link_encoding_format(&link->reported_link_cap) ==  			DP_128b_132b_ENCODING) {  		dp_verify_link_cap_with_retries(link, +				link_res,  				&link->reported_link_cap,  				LINK_TRAINING_MAX_VERIFY_RETRY);  	} @@ -3116,6 +3742,7 @@ static bool decide_fallback_link_setting_max_bw_policy(   *					and no further fallback could be done   */  static bool decide_fallback_link_setting( +		struct dc_link *link,  		struct dc_link_settings initial_link_settings,  		struct dc_link_settings *current_link_setting,  		enum link_training_result training_result) @@ -3123,7 +3750,8 @@ static bool decide_fallback_link_setting(  	if (!current_link_setting)  		return false;  #if defined(CONFIG_DRM_AMD_DC_DCN) -	if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING) +	if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING || +			link->dc->debug.force_dp2_lt_fallback_method)  		return decide_fallback_link_setting_max_bw_policy(&initial_link_settings,  				current_link_setting);  #endif @@ -3346,6 +3974,148 @@ bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *lin  	return false;  } +static bool decide_edp_link_settings_with_dsc(struct dc_link *link, +		struct dc_link_settings *link_setting, +		uint32_t req_bw, +		enum dc_link_rate max_link_rate) +{ +	struct dc_link_settings initial_link_setting; +	struct dc_link_settings current_link_setting; +	uint32_t link_bw; + +	unsigned int policy = 0; + +	policy = link->ctx->dc->debug.force_dsc_edp_policy; +	if (max_link_rate == LINK_RATE_UNKNOWN) +		max_link_rate = link->verified_link_cap.link_rate; +	/* +	 * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. +	 * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" +	 */ +	if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || +			link->dpcd_caps.edp_supported_link_rates_count == 0)) { +		/* for DSC enabled case, we search for minimum lane count */ +		memset(&initial_link_setting, 0, sizeof(initial_link_setting)); +		initial_link_setting.lane_count = LANE_COUNT_ONE; +		initial_link_setting.link_rate = LINK_RATE_LOW; +		initial_link_setting.link_spread = LINK_SPREAD_DISABLED; +		initial_link_setting.use_link_rate_set = false; +		initial_link_setting.link_rate_set = 0; +		current_link_setting = initial_link_setting; +		if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) +			return false; + +		/* search for the minimum link setting that: +		 * 1. is supported according to the link training result +		 * 2. could support the b/w requested by the timing +		 */ +		while (current_link_setting.link_rate <= +				max_link_rate) { +			link_bw = dc_link_bandwidth_kbps( +					link, +					¤t_link_setting); +			if (req_bw <= link_bw) { +				*link_setting = current_link_setting; +				return true; +			} +			if (policy) { +				/* minimize lane */ +				if (current_link_setting.link_rate < max_link_rate) { +					current_link_setting.link_rate = +							increase_link_rate( +									current_link_setting.link_rate); +				} else { +					if (current_link_setting.lane_count < +									link->verified_link_cap.lane_count) { +						current_link_setting.lane_count = +								increase_lane_count( +										current_link_setting.lane_count); +						current_link_setting.link_rate = initial_link_setting.link_rate; +					} else +						break; +				} +			} else { +				/* minimize link rate */ +				if (current_link_setting.lane_count < +						link->verified_link_cap.lane_count) { +					current_link_setting.lane_count = +							increase_lane_count( +									current_link_setting.lane_count); +				} else { +					current_link_setting.link_rate = +							increase_link_rate( +									current_link_setting.link_rate); +					current_link_setting.lane_count = +							initial_link_setting.lane_count; +				} +			} +		} +		return false; +	} + +	/* if optimize edp link is supported */ +	memset(&initial_link_setting, 0, sizeof(initial_link_setting)); +	initial_link_setting.lane_count = LANE_COUNT_ONE; +	initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; +	initial_link_setting.link_spread = LINK_SPREAD_DISABLED; +	initial_link_setting.use_link_rate_set = true; +	initial_link_setting.link_rate_set = 0; +	current_link_setting = initial_link_setting; + +	/* search for the minimum link setting that: +	 * 1. is supported according to the link training result +	 * 2. could support the b/w requested by the timing +	 */ +	while (current_link_setting.link_rate <= +			max_link_rate) { +		link_bw = dc_link_bandwidth_kbps( +				link, +				¤t_link_setting); +		if (req_bw <= link_bw) { +			*link_setting = current_link_setting; +			return true; +		} +		if (policy) { +			/* minimize lane */ +			if (current_link_setting.link_rate_set < +					link->dpcd_caps.edp_supported_link_rates_count +					&& current_link_setting.link_rate < max_link_rate) { +				current_link_setting.link_rate_set++; +				current_link_setting.link_rate = +					link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; +			} else { +				if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { +					current_link_setting.lane_count = +							increase_lane_count( +									current_link_setting.lane_count); +					current_link_setting.link_rate_set = initial_link_setting.link_rate_set; +					current_link_setting.link_rate = +						link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; +				} else +					break; +			} +		} else { +			/* minimize link rate */ +			if (current_link_setting.lane_count < +					link->verified_link_cap.lane_count) { +				current_link_setting.lane_count = +						increase_lane_count( +								current_link_setting.lane_count); +			} else { +				if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { +					current_link_setting.link_rate_set++; +					current_link_setting.link_rate = +						link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; +					current_link_setting.lane_count = +						initial_link_setting.lane_count; +				} else +					break; +			} +		} +	} +	return false; +} +  static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting)  {  	*link_setting = link->verified_link_cap; @@ -3380,7 +4150,25 @@ void decide_link_settings(struct dc_stream_state *stream,  		if (decide_mst_link_settings(link, link_setting))  			return;  	} else if (link->connector_signal == SIGNAL_TYPE_EDP) { -		if (decide_edp_link_settings(link, link_setting, req_bw)) +		/* enable edp link optimization for DSC eDP case */ +		if (stream->timing.flags.DSC) { +			enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; + +			if (link->ctx->dc->debug.force_dsc_edp_policy) { +				/* calculate link max link rate cap*/ +				struct dc_link_settings tmp_link_setting; +				struct dc_crtc_timing tmp_timing = stream->timing; +				uint32_t orig_req_bw; + +				tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; +				tmp_timing.flags.DSC = 0; +				orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); +				decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw); +				max_link_rate = tmp_link_setting.link_rate; +			} +			if (decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate)) +				return; +		} else if (decide_edp_link_settings(link, link_setting, req_bw))  			return;  	} else if (decide_dp_link_settings(link, link_setting, req_bw))  		return; @@ -3421,7 +4209,6 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)  		&psr_configuration.raw,  		sizeof(psr_configuration.raw)); -  	if (psr_configuration.bits.ENABLE) {  		unsigned char dpcdbuf[3] = {0};  		union psr_error_status psr_error_status; @@ -3453,10 +4240,12 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)  				sizeof(psr_error_status.raw));  			/* PSR error, disable and re-enable PSR */ -			allow_active = false; -			dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); -			allow_active = true; -			dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); +			if (link->psr_settings.psr_allow_active) { +				allow_active = false; +				dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); +				allow_active = true; +				dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL); +			}  			return true;  		} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == @@ -3534,6 +4323,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)  			&dpcd_lane_adjustment[0].raw,  			sizeof(dpcd_lane_adjustment)); +	if (link->dc->debug.apply_vendor_specific_lttpr_wa && +			(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +			link->lttpr_mode == LTTPR_MODE_TRANSPARENT) +		vendor_specific_lttpr_wa_three_dpcd( +				link, +				link_training_settings.dpcd_lane_settings); +  	/*get post cursor 2 parameters  	 * For DP 1.1a or eariler, this DPCD register's value is 0  	 * For DP 1.2 or later: @@ -4153,6 +4949,56 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)  	return -1;  } +#if defined(CONFIG_DRM_AMD_DC_DCN) +uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) +{ +	switch (bw) { +	case 0b001: +		return 9000000; +	case 0b010: +		return 18000000; +	case 0b011: +		return 24000000; +	case 0b100: +		return 32000000; +	case 0b101: +		return 40000000; +	case 0b110: +		return 48000000; +	} + +	return 0; +} + +/** + * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. + */ +static uint32_t intersect_frl_link_bw_support( +	const uint32_t max_supported_frl_bw_in_kbps, +	const union hdmi_encoded_link_bw hdmi_encoded_link_bw) +{ +	uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; + +	// HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) +	if (hdmi_encoded_link_bw.bits.FRL_MODE) { +		if (hdmi_encoded_link_bw.bits.BW_48Gbps) +			supported_bw_in_kbps = 48000000; +		else if (hdmi_encoded_link_bw.bits.BW_40Gbps) +			supported_bw_in_kbps = 40000000; +		else if (hdmi_encoded_link_bw.bits.BW_32Gbps) +			supported_bw_in_kbps = 32000000; +		else if (hdmi_encoded_link_bw.bits.BW_24Gbps) +			supported_bw_in_kbps = 24000000; +		else if (hdmi_encoded_link_bw.bits.BW_18Gbps) +			supported_bw_in_kbps = 18000000; +		else if (hdmi_encoded_link_bw.bits.BW_9Gbps) +			supported_bw_in_kbps = 9000000; +	} + +	return supported_bw_in_kbps; +} +#endif +  static void read_dp_device_vendor_id(struct dc_link *link)  {  	struct dp_device_vendor_id dp_id; @@ -4264,6 +5110,27 @@ static void get_active_converter_info(  						translate_dpcd_max_bpc(  							hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); +#if defined(CONFIG_DRM_AMD_DC_DCN) +					if (link->dc->caps.hdmi_frl_pcon_support) { +						union hdmi_encoded_link_bw hdmi_encoded_link_bw; + +						link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = +								dc_link_bw_kbps_from_raw_frl_link_rate_data( +										hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); + +						// Intersect reported max link bw support with the supported link rate post FRL link training +						if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, +								&hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { +							link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( +									link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, +									hdmi_encoded_link_bw); +						} + +						if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) +							link->dpcd_caps.dongle_caps.extendedCapValid = true; +					} +#endif +  					if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)  						link->dpcd_caps.dongle_caps.extendedCapValid = true;  				} @@ -4454,7 +5321,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)  				lttpr_dpcd_data,  				sizeof(lttpr_dpcd_data));  		if (status != DC_OK) { -			dm_error("%s: Read LTTPR caps data failed.\n", __func__); +			DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__);  			return false;  		} @@ -5218,7 +6085,7 @@ bool dc_link_dp_set_test_pattern(  			DP_TEST_PATTERN_VIDEO_MODE) {  		/* Set CRTC Test Pattern */  		set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); -		dp_set_hw_test_pattern(link, test_pattern, +		dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,  				(uint8_t *)p_custom_pattern,  				(uint32_t)cust_pattern_size); @@ -5240,8 +6107,18 @@ bool dc_link_dp_set_test_pattern(  	if (is_dp_phy_pattern(test_pattern)) {  		/* Set DPCD Lane Settings before running test pattern */  		if (p_link_settings != NULL) { -			dp_set_hw_lane_settings(link, p_link_settings, DPRX); -			dpcd_set_lane_settings(link, p_link_settings, DPRX); +			if (link->dc->debug.apply_vendor_specific_lttpr_wa && +					(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && +					link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { +				dpcd_set_lane_settings(link, p_link_settings, DPRX); +				vendor_specific_lttpr_wa_five( +						link, +						p_link_settings->dpcd_lane_settings, +						p_link_settings->link_settings.lane_count); +			} else { +				dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX); +				dpcd_set_lane_settings(link, p_link_settings, DPRX); +			}  		}  		/* Blank stream if running test pattern */ @@ -5254,7 +6131,7 @@ bool dc_link_dp_set_test_pattern(  			pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);  		} -		dp_set_hw_test_pattern(link, test_pattern, +		dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,  				(uint8_t *)p_custom_pattern,  				(uint32_t)cust_pattern_size); @@ -5574,7 +6451,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)  	return DP_PANEL_MODE_DEFAULT;  } -enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready) +enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready)  {  	/* FEC has to be "set ready" before the link training.  	 * The policy is to always train with FEC @@ -5665,6 +6542,23 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)  	}  } +struct link_encoder *dp_get_link_enc(struct dc_link *link) +{ +	struct link_encoder *link_enc; + +	link_enc = link->link_enc; +	if (link->is_dig_mapping_flexible && +	    link->dc->res_pool->funcs->link_encs_assign) { +		link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, +								  link); +		if (!link->link_enc) +			link_enc = link_enc_cfg_get_next_avail_link_enc( +				link->ctx->dc); +	} + +	return link_enc; +} +  void dpcd_set_source_specific_data(struct dc_link *link)  {  	if (!link->dc->vendor_signature.is_valid) { @@ -5885,7 +6779,10 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin  	req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); -	decide_edp_link_settings(link, &link_setting, req_bw); +	if (!crtc_timing->flags.DSC) +		decide_edp_link_settings(link, &link_setting, req_bw); +	else +		decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN);  	if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||  			lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { @@ -6121,8 +7018,21 @@ struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(  bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)  { +	/* If this assert is hit then we have a link encoder dynamic management issue */ +	ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);  	return (pipe_ctx->stream_res.hpo_dp_stream_enc && -			pipe_ctx->stream->link->hpo_dp_link_enc && +			pipe_ctx->link_res.hpo_dp_link_enc &&  			dc_is_dp_signal(pipe_ctx->stream->signal));  }  #endif + +void edp_panel_backlight_power_on(struct dc_link *link) +{ +	if (link->connector_signal != SIGNAL_TYPE_EDP) +		return; + +	link->dc->hwss.edp_power_control(link, true); +	link->dc->hwss.edp_wait_for_hpd_ready(link, true); +	if (link->dc->hwss.edp_backlight_control) +		link->dc->hwss.edp_backlight_control(link, true); +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c index b1c9f77d6bf4..0e95bc5df4e7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c @@ -77,7 +77,9 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)   * @param[in] link_setting Lane count, link rate and downspread control.   * @param[out] lt_settings Link settings and drive settings (voltage swing and pre-emphasis).   */ -static enum link_training_result dpia_configure_link(struct dc_link *link, +static enum link_training_result dpia_configure_link( +		struct dc_link *link, +		const struct link_resource *link_res,  		const struct dc_link_settings *link_setting,  		struct link_training_settings *lt_settings)  { @@ -94,25 +96,25 @@ static enum link_training_result dpia_configure_link(struct dc_link *link,  		lt_settings);  	status = dpcd_configure_channel_coding(link, lt_settings); -	if (status != DC_OK && !link->hpd_status) +	if (status != DC_OK && link->is_hpd_pending)  		return LINK_TRAINING_ABORT;  	/* Configure lttpr mode */  	status = dpcd_configure_lttpr_mode(link, lt_settings); -	if (status != DC_OK && !link->hpd_status) +	if (status != DC_OK && link->is_hpd_pending)  		return LINK_TRAINING_ABORT;  	/* Set link rate, lane count and spread. */  	status = dpcd_set_link_settings(link, lt_settings); -	if (status != DC_OK && !link->hpd_status) +	if (status != DC_OK && link->is_hpd_pending)  		return LINK_TRAINING_ABORT;  	if (link->preferred_training_settings.fec_enable)  		fec_enable = *link->preferred_training_settings.fec_enable;  	else  		fec_enable = true; -	status = dp_set_fec_ready(link, fec_enable); -	if (status != DC_OK && !link->hpd_status) +	status = dp_set_fec_ready(link, link_res, fec_enable); +	if (status != DC_OK && link->is_hpd_pending)  		return LINK_TRAINING_ABORT;  	return LINK_TRAINING_SUCCESS; @@ -252,7 +254,9 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).   * @param hop The Hop in display path. DPRX = 0.   */ -static enum link_training_result dpia_training_cr_non_transparent(struct dc_link *link, +static enum link_training_result dpia_training_cr_non_transparent( +		struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings,  		uint32_t hop)  { @@ -388,7 +392,7 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link  	}  	/* Abort link training if clock recovery failed due to HPD unplug. */ -	if (!link->hpd_status) +	if (link->is_hpd_pending)  		result = LINK_TRAINING_ABORT;  	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" @@ -411,7 +415,9 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link   * @param link DPIA link being trained.   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).   */ -static enum link_training_result dpia_training_cr_transparent(struct dc_link *link, +static enum link_training_result dpia_training_cr_transparent( +		struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings)  {  	enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; @@ -490,7 +496,7 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li  	}  	/* Abort link training if clock recovery failed due to HPD unplug. */ -	if (!link->hpd_status) +	if (link->is_hpd_pending)  		result = LINK_TRAINING_ABORT;  	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" @@ -511,16 +517,18 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).   * @param hop The Hop in display path. DPRX = 0.   */ -static enum link_training_result dpia_training_cr_phase(struct dc_link *link, +static enum link_training_result dpia_training_cr_phase( +		struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings,  		uint32_t hop)  {  	enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;  	if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) -		result = dpia_training_cr_non_transparent(link, lt_settings, hop); +		result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop);  	else -		result = dpia_training_cr_transparent(link, lt_settings); +		result = dpia_training_cr_transparent(link, link_res, lt_settings);  	return result;  } @@ -561,7 +569,9 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).   * @param hop The Hop in display path. DPRX = 0.   */ -static enum link_training_result dpia_training_eq_non_transparent(struct dc_link *link, +static enum link_training_result dpia_training_eq_non_transparent( +		struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings,  		uint32_t hop)  { @@ -675,7 +685,7 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link  	}  	/* Abort link training if equalization failed due to HPD unplug. */ -	if (!link->hpd_status) +	if (link->is_hpd_pending)  		result = LINK_TRAINING_ABORT;  	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" @@ -700,7 +710,9 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).   * @param hop The Hop in display path. DPRX = 0.   */ -static enum link_training_result dpia_training_eq_transparent(struct dc_link *link, +static enum link_training_result dpia_training_eq_transparent( +		struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings)  {  	enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; @@ -758,7 +770,7 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li  	}  	/* Abort link training if equalization failed due to HPD unplug. */ -	if (!link->hpd_status) +	if (link->is_hpd_pending)  		result = LINK_TRAINING_ABORT;  	DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" @@ -779,16 +791,18 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li   * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).   * @param hop The Hop in display path. DPRX = 0.   */ -static enum link_training_result dpia_training_eq_phase(struct dc_link *link, +static enum link_training_result dpia_training_eq_phase( +		struct dc_link *link, +		const struct link_resource *link_res,  		struct link_training_settings *lt_settings,  		uint32_t hop)  {  	enum link_training_result result;  	if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) -		result = dpia_training_eq_non_transparent(link, lt_settings, hop); +		result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop);  	else -		result = dpia_training_eq_transparent(link, lt_settings); +		result = dpia_training_eq_transparent(link, link_res, lt_settings);  	return result;  } @@ -892,10 +906,10 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)  				__func__,  				link->link_id.enum_id - ENUM_ID_1,  				link->lttpr_mode, -				link->hpd_status); +				link->is_hpd_pending);  	/* Abandon clean-up if sink unplugged. */ -	if (!link->hpd_status) +	if (link->is_hpd_pending)  		return;  	if (hop != DPRX) @@ -908,7 +922,9 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop)  	core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);  } -enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *link, +enum link_training_result dc_link_dpia_perform_link_training( +	struct dc_link *link, +	const struct link_resource *link_res,  	const struct dc_link_settings *link_setting,  	bool skip_video_pattern)  { @@ -918,7 +934,7 @@ enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *lin  	int8_t repeater_id; /* Current hop. */  	/* Configure link as prescribed in link_setting and set LTTPR mode. */ -	result = dpia_configure_link(link, link_setting, <_settings); +	result = dpia_configure_link(link, link_res, link_setting, <_settings);  	if (result != LINK_TRAINING_SUCCESS)  		return result; @@ -930,12 +946,12 @@ enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *lin  	 */  	for (repeater_id = repeater_cnt; repeater_id >= 0; repeater_id--) {  		/* Clock recovery. */ -		result = dpia_training_cr_phase(link, <_settings, repeater_id); +		result = dpia_training_cr_phase(link, link_res, <_settings, repeater_id);  		if (result != LINK_TRAINING_SUCCESS)  			break;  		/* Equalization. */ -		result = dpia_training_eq_phase(link, <_settings, repeater_id); +		result = dpia_training_eq_phase(link, link_res, <_settings, repeater_id);  		if (result != LINK_TRAINING_SUCCESS)  			break; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 25e48a8cbb78..a55944da8d53 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -118,7 +118,10 @@ static void remove_link_enc_assignment(  				 */  				if (get_stream_using_link_enc(state, eng_id) == NULL)  					state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id; +  				stream->link_enc = NULL; +				state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; +				state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL;  				break;  			}  		} @@ -148,6 +151,7 @@ static void add_link_enc_assignment(  						.ep_type = stream->link->ep_type},  					.eng_id = eng_id,  					.stream = stream}; +				dc_stream_retain(stream);  				state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;  				stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx];  				break; @@ -227,7 +231,7 @@ static struct link_encoder *get_link_enc_used_by_link(  		.link_id = link->link_id,  		.ep_type = link->ep_type}; -	for (i = 0; i < state->stream_count; i++) { +	for (i = 0; i < MAX_PIPES; i++) {  		struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];  		if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) @@ -237,28 +241,18 @@ static struct link_encoder *get_link_enc_used_by_link(  	return link_enc;  }  /* Clear all link encoder assignments. */ -static void clear_enc_assignments(struct dc_state *state) +static void clear_enc_assignments(const struct dc *dc, struct dc_state *state)  {  	int i; -	enum engine_id eng_id; -	struct dc_stream_state *stream;  	for (i = 0; i < MAX_PIPES; i++) {  		state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; -		eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id; -		stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream; -		if (eng_id != ENGINE_ID_UNKNOWN) -			state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id; -		if (stream) -			stream->link_enc = NULL; +		state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; +		if (state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream != NULL) { +			dc_stream_release(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream); +			state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; +		}  	} -} - -void link_enc_cfg_init( -		struct dc *dc, -		struct dc_state *state) -{ -	int i;  	for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {  		if (dc->res_pool->link_encoders[i]) @@ -266,8 +260,13 @@ void link_enc_cfg_init(  		else  			state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;  	} +} -	clear_enc_assignments(state); +void link_enc_cfg_init( +		const struct dc *dc, +		struct dc_state *state) +{ +	clear_enc_assignments(dc, state);  	state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;  } @@ -284,12 +283,9 @@ void link_enc_cfg_link_encs_assign(  	ASSERT(state->stream_count == stream_count); -	if (stream_count == 0) -		clear_enc_assignments(state); -  	/* Release DIG link encoder resources before running assignment algorithm. */ -	for (i = 0; i < stream_count; i++) -		dc->res_pool->funcs->link_enc_unassign(state, streams[i]); +	for (i = 0; i < dc->current_state->stream_count; i++) +		dc->res_pool->funcs->link_enc_unassign(state, dc->current_state->streams[i]);  	for (i = 0; i < MAX_PIPES; i++)  		ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false); @@ -544,6 +540,7 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)  	uint8_t dig_stream_count = 0;  	int matching_stream_ptrs = 0;  	int eng_ids_per_ep_id[MAX_PIPES] = {0}; +	int valid_bitmap = 0;  	/* (1) No. valid entries same as stream count. */  	for (i = 0; i < MAX_PIPES; i++) { @@ -625,5 +622,15 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)  	is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams;  	ASSERT(is_valid); +	if (is_valid == false) { +		valid_bitmap = +			(valid_entries & 0x1) | +			((valid_stream_ptrs & 0x1) << 1) | +			((valid_uniqueness & 0x1) << 2) | +			((valid_avail & 0x1) << 3) | +			((valid_streams & 0x1) << 4); +		dm_error("Invalid link encoder assignments: 0x%x\n", valid_bitmap); +	} +  	return is_valid;  } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 368e834c6809..45d03d3a95c3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -71,6 +71,7 @@ void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)  void dp_enable_link_phy(  	struct dc_link *link, +	const struct link_resource *link_res,  	enum signal_type signal,  	enum clock_source_id clock_source,  	const struct dc_link_settings *link_settings) @@ -135,7 +136,7 @@ void dp_enable_link_phy(  #if defined(CONFIG_DRM_AMD_DC_DCN)  	if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { -		enable_dp_hpo_output(link, link_settings); +		enable_dp_hpo_output(link, link_res, link_settings);  	} else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {  		if (dc_is_dp_sst_signal(signal)) {  			link_enc->funcs->enable_dp_output( @@ -236,12 +237,13 @@ bool edp_receiver_ready_T7(struct dc_link *link)  	return result;  } -void dp_disable_link_phy(struct dc_link *link, enum signal_type signal) +void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, +		enum signal_type signal)  {  	struct dc  *dc = link->ctx->dc;  	struct dmcu *dmcu = dc->res_pool->dmcu;  #if defined(CONFIG_DRM_AMD_DC_DCN) -	struct hpo_dp_link_encoder *hpo_link_enc = link->hpo_dp_link_enc; +	struct hpo_dp_link_encoder *hpo_link_enc = link_res->hpo_dp_link_enc;  #endif  	struct link_encoder *link_enc; @@ -260,7 +262,7 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)  			link->dc->hwss.edp_backlight_control(link, false);  #if defined(CONFIG_DRM_AMD_DC_DCN)  		if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) -			disable_dp_hpo_output(link, signal); +			disable_dp_hpo_output(link, link_res, signal);  		else  			link_enc->funcs->disable_output(link_enc, signal);  #else @@ -274,7 +276,7 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)  #if defined(CONFIG_DRM_AMD_DC_DCN)  		if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING &&  				hpo_link_enc) -			disable_dp_hpo_output(link, signal); +			disable_dp_hpo_output(link, link_res, signal);  		else  			link_enc->funcs->disable_output(link_enc, signal);  #else @@ -294,13 +296,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)  		dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);  } -void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal) +void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, +		enum signal_type signal)  {  	/* MST disable link only when no stream use the link */  	if (link->mst_stream_alloc_table.stream_count > 0)  		return; -	dp_disable_link_phy(link, signal); +	dp_disable_link_phy(link, link_res, signal);  	/* set the sink to SST mode after disabling the link */  	dp_enable_mst_on_sink(link, false); @@ -308,6 +311,7 @@ void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal)  bool dp_set_hw_training_pattern(  	struct dc_link *link, +	const struct link_resource *link_res,  	enum dc_dp_training_pattern pattern,  	uint32_t offset)  { @@ -338,7 +342,7 @@ bool dp_set_hw_training_pattern(  		break;  	} -	dp_set_hw_test_pattern(link, test_pattern, NULL, 0); +	dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0);  	return true;  } @@ -349,6 +353,7 @@ bool dp_set_hw_training_pattern(  #endif  void dp_set_hw_lane_settings(  	struct dc_link *link, +	const struct link_resource *link_res,  	const struct link_training_settings *link_settings,  	uint32_t offset)  { @@ -361,8 +366,8 @@ void dp_set_hw_lane_settings(  #if defined(CONFIG_DRM_AMD_DC_DCN)  	if (dp_get_link_encoding_format(&link_settings->link_settings) ==  			DP_128b_132b_ENCODING) { -		link->hpo_dp_link_enc->funcs->set_ffe( -				link->hpo_dp_link_enc, +		link_res->hpo_dp_link_enc->funcs->set_ffe( +				link_res->hpo_dp_link_enc,  				&link_settings->link_settings,  				link_settings->lane_settings[0].FFE_PRESET.raw);  	} else if (dp_get_link_encoding_format(&link_settings->link_settings) @@ -379,6 +384,7 @@ void dp_set_hw_lane_settings(  void dp_set_hw_test_pattern(  	struct dc_link *link, +	const struct link_resource *link_res,  	enum dp_test_pattern test_pattern,  	uint8_t *custom_pattern,  	uint32_t custom_pattern_size) @@ -406,8 +412,8 @@ void dp_set_hw_test_pattern(  #if defined(CONFIG_DRM_AMD_DC_DCN)  	switch (link_encoding_format) {  	case DP_128b_132b_ENCODING: -		link->hpo_dp_link_enc->funcs->set_link_test_pattern( -				link->hpo_dp_link_enc, &pattern_param); +		link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( +				link_res->hpo_dp_link_enc, &pattern_param);  		break;  	case DP_8b_10b_ENCODING:  		ASSERT(encoder); @@ -446,7 +452,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,  					pipes[i].stream_res.stream_enc);  			/* disable any test pattern that might be active */ -			dp_set_hw_test_pattern(link, +			dp_set_hw_test_pattern(link, &pipes[i].link_res,  					DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);  			dp_receiver_power_ctrl(link, false); @@ -763,7 +769,9 @@ static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)  	}  } -void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings) +void enable_dp_hpo_output(struct dc_link *link, +		const struct link_resource *link_res, +		const struct dc_link_settings *link_settings)  {  	const struct dc *dc = link->dc;  	enum phyd32clk_clock_source phyd32clk; @@ -789,10 +797,11 @@ void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *l  		}  	} else {  		/* DP2.0 HW: call transmitter control to enable PHY */ -		link->hpo_dp_link_enc->funcs->enable_link_phy( -				link->hpo_dp_link_enc, +		link_res->hpo_dp_link_enc->funcs->enable_link_phy( +				link_res->hpo_dp_link_enc,  				link_settings, -				link->link_enc->transmitter); +				link->link_enc->transmitter, +				link->link_enc->hpd_source);  	}  	/* DCCG muxing and DTBCLK DTO */ @@ -806,24 +815,26 @@ void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *l  		phyd32clk = get_phyd32clk_src(link);  		dc->res_pool->dccg->funcs->enable_symclk32_le(  				dc->res_pool->dccg, -				link->hpo_dp_link_enc->inst, +				link_res->hpo_dp_link_enc->inst,  				phyd32clk); -		link->hpo_dp_link_enc->funcs->link_enable( -					link->hpo_dp_link_enc, -					link_settings->lane_count); +		link_res->hpo_dp_link_enc->funcs->link_enable( +				link_res->hpo_dp_link_enc, +				link_settings->lane_count);  	}  } -void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal) +void disable_dp_hpo_output(struct dc_link *link, +		const struct link_resource *link_res, +		enum signal_type signal)  {  	const struct dc *dc = link->dc; -	link->hpo_dp_link_enc->funcs->link_disable(link->hpo_dp_link_enc); +	link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc);  	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {  		dc->res_pool->dccg->funcs->disable_symclk32_le(  					dc->res_pool->dccg, -					link->hpo_dp_link_enc->inst); +					link_res->hpo_dp_link_enc->inst);  		dc->res_pool->dccg->funcs->set_physymclk(  					dc->res_pool->dccg, @@ -834,8 +845,8 @@ void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal)  		dm_set_phyd32clk(dc->ctx, 0);  	} else {  		/* DP2.0 HW: call transmitter control to disable PHY */ -		link->hpo_dp_link_enc->funcs->disable_link_phy( -				link->hpo_dp_link_enc, +		link_res->hpo_dp_link_enc->funcs->disable_link_phy( +				link_res->hpo_dp_link_enc,  				signal);  	}  } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c32fdccd4d92..d4ff6cc6b8d9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged(  	if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)  		return false; +	// Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks +	if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) +		return false; +  	return true;  } @@ -1720,6 +1724,94 @@ static void update_hpo_dp_stream_engine_usage(  			res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired;  	}  } + +static inline int find_acquired_hpo_dp_link_enc_for_link( +		const struct resource_context *res_ctx, +		const struct dc_link *link) +{ +	int i; + +	for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_to_link_idx); i++) +		if (res_ctx->hpo_dp_link_enc_ref_cnts[i] > 0 && +				res_ctx->hpo_dp_link_enc_to_link_idx[i] == link->link_index) +			return i; + +	return -1; +} + +static inline int find_free_hpo_dp_link_enc(const struct resource_context *res_ctx, +		const struct resource_pool *pool) +{ +	int i; + +	for (i = 0; i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts); i++) +		if (res_ctx->hpo_dp_link_enc_ref_cnts[i] == 0) +			break; + +	return (i < ARRAY_SIZE(res_ctx->hpo_dp_link_enc_ref_cnts) && +			i < pool->hpo_dp_link_enc_count) ? i : -1; +} + +static inline void acquire_hpo_dp_link_enc( +		struct resource_context *res_ctx, +		unsigned int link_index, +		int enc_index) +{ +	res_ctx->hpo_dp_link_enc_to_link_idx[enc_index] = link_index; +	res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] = 1; +} + +static inline void retain_hpo_dp_link_enc( +		struct resource_context *res_ctx, +		int enc_index) +{ +	res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]++; +} + +static inline void release_hpo_dp_link_enc( +		struct resource_context *res_ctx, +		int enc_index) +{ +	ASSERT(res_ctx->hpo_dp_link_enc_ref_cnts[enc_index] > 0); +	res_ctx->hpo_dp_link_enc_ref_cnts[enc_index]--; +} + +static bool add_hpo_dp_link_enc_to_ctx(struct resource_context *res_ctx, +		const struct resource_pool *pool, +		struct pipe_ctx *pipe_ctx, +		struct dc_stream_state *stream) +{ +	int enc_index; + +	enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link); + +	if (enc_index >= 0) { +		retain_hpo_dp_link_enc(res_ctx, enc_index); +	} else { +		enc_index = find_free_hpo_dp_link_enc(res_ctx, pool); +		if (enc_index >= 0) +			acquire_hpo_dp_link_enc(res_ctx, stream->link->link_index, enc_index); +	} + +	if (enc_index >= 0) +		pipe_ctx->link_res.hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index]; + +	return pipe_ctx->link_res.hpo_dp_link_enc != NULL; +} + +static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx, +		struct pipe_ctx *pipe_ctx, +		struct dc_stream_state *stream) +{ +	int enc_index; + +	enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, stream->link); + +	if (enc_index >= 0) { +		release_hpo_dp_link_enc(res_ctx, enc_index); +		pipe_ctx->link_res.hpo_dp_link_enc = NULL; +	} +}  #endif  /* TODO: release audio object */ @@ -1882,6 +1974,7 @@ enum dc_status dc_remove_stream_from_ctx(  			&new_ctx->res_ctx, dc->res_pool,  			del_pipe->stream_res.hpo_dp_stream_enc,  			false); +		remove_hpo_dp_link_enc_from_ctx(&new_ctx->res_ctx, del_pipe, del_pipe->stream);  	}  #endif @@ -2078,7 +2171,6 @@ static void mark_seamless_boot_stream(  {  	struct dc_bios *dcb = dc->ctx->dc_bios; -	/* TODO: Check Linux */  	if (dc->config.allow_seamless_boot_optimization &&  			!dcb->funcs->is_accelerated_mode(dcb)) {  		if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing)) @@ -2158,6 +2250,8 @@ enum dc_status resource_map_pool_resources(  					&context->res_ctx, pool,  					pipe_ctx->stream_res.hpo_dp_stream_enc,  					true); +			if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, pool, pipe_ctx, stream)) +				return DC_NO_LINK_ENC_RESOURCE;  		}  	}  #endif @@ -2224,6 +2318,9 @@ void dc_resource_state_construct(  		struct dc_state *dst_ctx)  {  	dst_ctx->clk_mgr = dc->clk_mgr; + +	/* Initialise DIG link encoder resource tracking variables. */ +	link_enc_cfg_init(dc, dst_ctx);  } @@ -2252,16 +2349,6 @@ enum dc_status dc_validate_global_state(  	if (!new_ctx)  		return DC_ERROR_UNEXPECTED; -#if defined(CONFIG_DRM_AMD_DC_DCN) - -	/* -	 * Update link encoder to stream assignment. -	 * TODO: Split out reason allocation from validation. -	 */ -	if (dc->res_pool->funcs->link_encs_assign && fast_validate == false) -		dc->res_pool->funcs->link_encs_assign( -			dc, new_ctx, new_ctx->streams, new_ctx->stream_count); -#endif  	if (dc->res_pool->funcs->validate_global) {  		result = dc->res_pool->funcs->validate_global(dc, new_ctx); @@ -2313,6 +2400,16 @@ enum dc_status dc_validate_global_state(  		if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))  			result = DC_FAIL_BANDWIDTH_VALIDATE; +#if defined(CONFIG_DRM_AMD_DC_DCN) +	/* +	 * Only update link encoder to stream assignment after bandwidth validation passed. +	 * TODO: Split out assignment and validation. +	 */ +	if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) +		dc->res_pool->funcs->link_encs_assign( +			dc, new_ctx, new_ctx->streams, new_ctx->stream_count); +#endif +  	return result;  } @@ -2506,17 +2603,7 @@ static void set_avi_info_frame(  	/* TODO : We should handle YCC quantization */  	/* but we do not have matrix calculation */ -	if (stream->qy_bit == 1) { -		if (color_space == COLOR_SPACE_SRGB || -			color_space == COLOR_SPACE_2020_RGB_FULLRANGE) -			hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; -		else if (color_space == COLOR_SPACE_SRGB_LIMITED || -					color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) -			hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; -		else -			hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; -	} else -		hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; +	hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE;  	///VIC  	format = stream->timing.timing_3d_format; @@ -2840,6 +2927,8 @@ bool pipe_need_reprogram(  #if defined(CONFIG_DRM_AMD_DC_DCN)  	if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc)  		return true; +	if (pipe_ctx_old->link_res.hpo_dp_link_enc != pipe_ctx->link_res.hpo_dp_link_enc) +		return true;  #endif  	/* DIG link encoder resource assignment for stream changed. */ @@ -3108,21 +3197,55 @@ void get_audio_check(struct audio_info *aud_modes,  }  #if defined(CONFIG_DRM_AMD_DC_DCN) -struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder( -		const struct resource_pool *pool) +struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( +		const struct resource_context *res_ctx, +		const struct resource_pool *pool, +		const struct dc_link *link)  { -	uint8_t i; -	struct hpo_dp_link_encoder *enc = NULL; +	struct hpo_dp_link_encoder *hpo_dp_link_enc = NULL; +	int enc_index; + +	enc_index = find_acquired_hpo_dp_link_enc_for_link(res_ctx, link); + +	if (enc_index < 0) +		enc_index = find_free_hpo_dp_link_enc(res_ctx, pool); + +	if (enc_index >= 0) +		hpo_dp_link_enc = pool->hpo_dp_link_enc[enc_index]; + +	return hpo_dp_link_enc; +} +#endif -	ASSERT(pool->hpo_dp_link_enc_count <= MAX_HPO_DP2_LINK_ENCODERS); +uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter) +{ +	/* TODO - get transmitter to phy idx mapping from DMUB */ +	uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A; -	for (i = 0; i < pool->hpo_dp_link_enc_count; i++) { -		if (pool->hpo_dp_link_enc[i]->transmitter == TRANSMITTER_UNKNOWN) { -			enc = pool->hpo_dp_link_enc[i]; +#if defined(CONFIG_DRM_AMD_DC_DCN) +	if (dc->ctx->dce_version == DCN_VERSION_3_1 && +			dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { +		switch (transmitter) { +		case TRANSMITTER_UNIPHY_A: +			phy_idx = 0; +			break; +		case TRANSMITTER_UNIPHY_B: +			phy_idx = 1; +			break; +		case TRANSMITTER_UNIPHY_C: +			phy_idx = 5; +			break; +		case TRANSMITTER_UNIPHY_D: +			phy_idx = 6; +			break; +		case TRANSMITTER_UNIPHY_E: +			phy_idx = 4; +			break; +		default: +			phy_idx = 0;  			break;  		}  	} - -	return enc; -}  #endif +	return phy_idx; +} diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c index a249a0e5edd0..4b5e4d8e7735 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c @@ -33,14 +33,6 @@   * Private functions   ******************************************************************************/ -static void dc_sink_destruct(struct dc_sink *sink) -{ -	if (sink->dc_container_id) { -		kfree(sink->dc_container_id); -		sink->dc_container_id = NULL; -	} -} -  static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params)  { @@ -75,7 +67,7 @@ void dc_sink_retain(struct dc_sink *sink)  static void dc_sink_free(struct kref *kref)  {  	struct dc_sink *sink = container_of(kref, struct dc_sink, refcount); -	dc_sink_destruct(sink); +	kfree(sink->dc_container_id);  	kfree(sink);  } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3aac3f4a2852..da2c78ce14d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload;  struct set_config_cmd_payload;  struct dmub_notification; -#define DC_VER "3.2.160" +#define DC_VER "3.2.167"  #define MAX_SURFACES 3  #define MAX_PLANES 6 @@ -75,6 +75,16 @@ enum dc_plane_type {  	DC_PLANE_TYPE_DCN_UNIVERSAL,  }; +// Sizes defined as multiples of 64KB +enum det_size { +	DET_SIZE_DEFAULT = 0, +	DET_SIZE_192KB = 3, +	DET_SIZE_256KB = 4, +	DET_SIZE_320KB = 5, +	DET_SIZE_384KB = 6 +}; + +  struct dc_plane_cap {  	enum dc_plane_type type;  	uint32_t blends_with_above : 1; @@ -187,7 +197,9 @@ struct dc_caps {  	struct dc_color_caps color;  #if defined(CONFIG_DRM_AMD_DC_DCN)  	bool dp_hpo; +	bool hdmi_frl_pcon_support;  #endif +	bool edp_dsc_support;  	bool vbios_lttpr_aware;  	bool vbios_lttpr_enable;  }; @@ -508,7 +520,9 @@ union dpia_debug_options {  		uint32_t disable_dpia:1;  		uint32_t force_non_lttpr:1;  		uint32_t extend_aux_rd_interval:1; -		uint32_t reserved:29; +		uint32_t disable_mst_dsc_work_around:1; +		uint32_t hpd_delay_in_ms:12; +		uint32_t reserved:16;  	} bits;  	uint32_t raw;  }; @@ -573,6 +587,8 @@ struct dc_debug_options {  	bool native422_support;  	bool disable_dsc;  	enum visual_confirm visual_confirm; +	int visual_confirm_rect_height; +  	bool sanity_checks;  	bool max_disp_clk;  	bool surface_trace; @@ -667,11 +683,15 @@ struct dc_debug_options {  	bool validate_dml_output;  	bool enable_dmcub_surface_flip;  	bool usbc_combo_phy_reset_wa; +	bool disable_dsc_edp; +	unsigned int  force_dsc_edp_policy;  	bool enable_dram_clock_change_one_display_vactive;  #if defined(CONFIG_DRM_AMD_DC_DCN)  	/* TODO - remove once tested */  	bool legacy_dp2_lt;  	bool set_mst_en_for_sst; +	bool disable_uhbr; +	bool force_dp2_lt_fallback_method;  #endif  	union mem_low_power_enable_options enable_mem_low_power;  	union root_clock_optimization_options root_clock_optimization; @@ -684,11 +704,14 @@ struct dc_debug_options {  	/* FEC/PSR1 sequence enable delay in 100us */  	uint8_t fec_enable_delay_in100us;  	bool enable_driver_sequence_debug; +	enum det_size crb_alloc_policy; +	int crb_alloc_policy_min_disp_count;  #if defined(CONFIG_DRM_AMD_DC_DCN)  	bool disable_z10;  	bool enable_sw_cntl_psr;  	union dpia_debug_options dpia_debug;  #endif +	bool apply_vendor_specific_lttpr_wa;  };  struct gpu_info_soc_bounding_box_v1_0; @@ -1289,6 +1312,11 @@ struct dc_sink_dsc_caps {  	// 'true' if these are virtual DPCD's DSC caps (immediately upstream of sink in MST topology),  	// 'false' if they are sink's DSC caps  	bool is_virtual_dpcd_dsc; +#if defined(CONFIG_DRM_AMD_DC_DCN) +	// 'true' if MST topology supports DSC passthrough for sink +	// 'false' if MST topology does not support DSC passthrough +	bool is_dsc_passthrough_supported; +#endif  	struct dsc_dec_dpcd_caps dsc_dec_caps;  }; @@ -1404,6 +1432,9 @@ void dc_unlock_memory_clock_frequency(struct dc *dc);   */  void dc_lock_memory_clock_frequency(struct dc *dc); +/* set soft max for memclk, to be used for AC/DC switching clock limitations */ +void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable); +  /* cleanup on driver unload */  void dc_hardware_release(struct dc *dc); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 360f3199ea6f..541376fabbef 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -115,13 +115,44 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)  	}  } +void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv) +{ +	struct dmub_srv *dmub = dmub_srv->dmub; +	struct dc_context *dc_ctx = dmub_srv->ctx; +	enum dmub_status status = DMUB_STATUS_OK; + +	status = dmub_srv_clear_inbox0_ack(dmub); +	if (status != DMUB_STATUS_OK) { +		DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status); +		dc_dmub_srv_log_diagnostic_data(dmub_srv); +	} +} + +void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv) +{ +	struct dmub_srv *dmub = dmub_srv->dmub; +	struct dc_context *dc_ctx = dmub_srv->ctx; +	enum dmub_status status = DMUB_STATUS_OK; + +	status = dmub_srv_wait_for_inbox0_ack(dmub, 100000); +	if (status != DMUB_STATUS_OK) { +		DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n"); +		dc_dmub_srv_log_diagnostic_data(dmub_srv); +	} +} +  void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv,  		union dmub_inbox0_data_register data)  {  	struct dmub_srv *dmub = dmub_srv->dmub; -	if (dmub->hw_funcs.send_inbox0_cmd) -		dmub->hw_funcs.send_inbox0_cmd(dmub, data); -	// TODO: Add wait command -- poll register for ACK +	struct dc_context *dc_ctx = dmub_srv->ctx; +	enum dmub_status status = DMUB_STATUS_OK; + +	status = dmub_srv_send_inbox0_cmd(dmub, data); +	if (status != DMUB_STATUS_OK) { +		DC_ERROR("Error sending INBOX0 cmd\n"); +		dc_dmub_srv_log_diagnostic_data(dmub_srv); +	}  }  bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 3e35eee7188c..7e4e2ec5915d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -68,6 +68,8 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu  void dc_dmub_trace_event_control(struct dc *dc, bool enable); +void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv); +void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv);  void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);  bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca); diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index e68e9a86a4d9..353dac420f34 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -378,7 +378,14 @@ enum dpcd_downstream_port_detailed_type {  union dwnstream_port_caps_byte2 {  	struct {  		uint8_t MAX_BITS_PER_COLOR_COMPONENT:2; +#if defined(CONFIG_DRM_AMD_DC_DCN) +		uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3; +		uint8_t SOURCE_CONTROL_MODE_SUPPORT:1; +		uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1; +		uint8_t RESERVED:1; +#else  		uint8_t RESERVED:6; +#endif  	} bits;  	uint8_t raw;  }; @@ -416,6 +423,30 @@ union dwnstream_port_caps_byte3_hdmi {  	uint8_t raw;  }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +union hdmi_sink_encoded_link_bw_support { +	struct { +		uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3; +		uint8_t RESERVED:5; +	} bits; +	uint8_t raw; +}; + +union hdmi_encoded_link_bw { +	struct { +		uint8_t FRL_MODE:1; // Bit 0 +		uint8_t BW_9Gbps:1; +		uint8_t BW_18Gbps:1; +		uint8_t BW_24Gbps:1; +		uint8_t BW_32Gbps:1; +		uint8_t BW_40Gbps:1; +		uint8_t BW_48Gbps:1; +		uint8_t RESERVED:1; // Bit 7 +	} bits; +	uint8_t raw; +}; +#endif +  /*4-byte structure for detailed capabilities of a down-stream port  (DP-to-TMDS converter).*/  union dwnstream_portxcaps { @@ -852,6 +883,15 @@ struct psr_caps {  	unsigned char psr_version;  	unsigned int psr_rfb_setup_time;  	bool psr_exit_link_training_required; +	unsigned char edp_revision; +	unsigned char support_ver; +	bool su_granularity_required; +	bool y_coordinate_required; +	uint8_t su_y_granularity; +	bool alpm_cap; +	bool standby_support; +	uint8_t rate_control_caps; +	unsigned int psr_power_opt_flag;  };  /* Length of router topology ID read from DPCD in bytes. */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 52355fe6994c..eac34f591a3f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -741,6 +741,9 @@ struct dc_dsc_config {  	uint32_t version_minor; /* DSC minor version. Full version is formed as 1.version_minor. */  	bool ycbcr422_simple; /* Tell DSC engine to convert YCbCr 4:2:2 to 'YCbCr 4:2:2 simple'. */  	int32_t rc_buffer_size; /* DSC RC buffer block size in bytes */ +#if defined(CONFIG_DRM_AMD_DC_DCN) +	bool is_frl; /* indicate if DSC is applied based on HDMI FRL sink's capability */ +#endif  	bool is_dp; /* indicate if DSC is applied based on DP's capability */  };  struct dc_crtc_timing { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 180ecd860296..c0e37ad0e26c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -30,6 +30,8 @@  #include "dc_types.h"  #include "grph_object_defs.h" +struct link_resource; +  enum dc_link_fec_state {  	dc_link_fec_not_ready,  	dc_link_fec_ready, @@ -113,6 +115,7 @@ struct dc_link {  	 * DIG encoder. */  	bool is_dig_mapping_flexible;  	bool hpd_status; /* HPD status of link without physical HPD pin. */ +	bool is_hpd_pending; /* Indicates a new received hpd */  	bool edp_sink_present; @@ -159,9 +162,6 @@ struct dc_link {  	struct panel_cntl *panel_cntl;  	struct link_encoder *link_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) -	struct hpo_dp_link_encoder *hpo_dp_link_enc; -#endif  	struct graphics_object_id link_id;  	/* Endpoint type distinguishes display endpoints which do not have entries  	 * in the BIOS connector table from those that do. Helps when tracking link @@ -185,12 +185,18 @@ struct dc_link {  	/* Drive settings read from integrated info table */  	struct dc_lane_settings bios_forced_drive_settings; +	/* Vendor specific LTTPR workaround variables */ +	uint8_t vendor_specific_lttpr_link_rate_wa; +	bool apply_vendor_specific_lttpr_link_rate_wa; +  	/* MST record stream using this link */  	struct link_flags {  		bool dp_keep_receiver_powered;  		bool dp_skip_DID2;  		bool dp_skip_reset_segment;  		bool dp_mot_reset_segment; +		/* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ +		bool dpia_mst_dsc_always_on;  	} wa_flags;  	struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -224,6 +230,8 @@ static inline void get_edp_links(const struct dc *dc,  	*edp_num = 0;  	for (i = 0; i < dc->link_count; i++) {  		// report any eDP links, even unconnected DDI's +		if (!dc->links[i]) +			continue;  		if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) {  			edp_links[*edp_num] = dc->links[i];  			if (++(*edp_num) == MAX_NUM_EDP) @@ -287,6 +295,10 @@ bool dc_link_setup_psr(struct dc_link *dc_link,  void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency); +void dc_link_blank_all_dp_displays(struct dc *dc); + +void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init); +  /* Request DC to detect if there is a Panel connected.   * boot - If this call is during initial boot.   * Return false for any type of detection failure or MST detection @@ -298,7 +310,7 @@ enum dc_detect_reason {  	DETECT_REASON_HPD,  	DETECT_REASON_HPDRX,  	DETECT_REASON_FALLBACK, -	DETECT_REASON_RETRAIN +	DETECT_REASON_RETRAIN,  };  bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); @@ -346,14 +358,17 @@ void dc_link_remove_remote_sink(  void dc_link_dp_set_drive_settings(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct link_training_settings *lt_settings);  bool dc_link_dp_perform_link_training_skip_aux(  	struct dc_link *link, +	const struct link_resource *link_res,  	const struct dc_link_settings *link_setting);  enum link_training_result dc_link_dp_perform_link_training(  	struct dc_link *link, +	const struct link_resource *link_res,  	const struct dc_link_settings *link_settings,  	bool skip_video_pattern); @@ -361,6 +376,7 @@ bool dc_link_dp_sync_lt_begin(struct dc_link *link);  enum link_training_result dc_link_dp_sync_lt_attempt(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct dc_link_settings *link_setting,  	struct dc_link_training_overrides *lt_settings); @@ -438,6 +454,13 @@ bool dc_link_is_fec_supported(const struct dc_link *link);  bool dc_link_should_enable_fec(const struct dc_link *link);  #if defined(CONFIG_DRM_AMD_DC_DCN) +uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);  enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);  #endif + +const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link); +/* take a snapshot of current link resource allocation state */ +void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map); +/* restore link resource allocation state from a snapshot */ +void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);  #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 388457ffc0a8..0285a4b38d05 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -430,6 +430,7 @@ struct dc_dongle_caps {  	uint32_t dp_hdmi_max_bpc;  	uint32_t dp_hdmi_max_pixel_clk_in_khz;  #if defined(CONFIG_DRM_AMD_DC_DCN) +	uint32_t dp_hdmi_frl_max_link_bw_in_kbps;  	struct dc_dongle_dfp_cap_ext dfp_cap_ext;  #endif  }; @@ -950,6 +951,7 @@ enum dc_gpu_mem_alloc_type {  enum dc_psr_version {  	DC_PSR_VERSION_1			= 0, +	DC_PSR_VERSION_SU_1			= 1,  	DC_PSR_VERSION_UNSUPPORTED		= 0xFFFFFFFF,  }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 27218ede150a..70eaac017624 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -67,9 +67,6 @@ static void write_indirect_azalia_reg(struct audio *audio,  	/* AZALIA_F0_CODEC_ENDPOINT_DATA  endpoint data  */  	REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0,  			AZALIA_ENDPOINT_REG_DATA, reg_data); - -	DC_LOG_HW_AUDIO("AUDIO:write_indirect_azalia_reg: index: %u  data: %u\n", -		reg_index, reg_data);  }  static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index) @@ -85,9 +82,6 @@ static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index  	/* AZALIA_F0_CODEC_ENDPOINT_DATA  endpoint data  */  	value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA); -	DC_LOG_HW_AUDIO("AUDIO:read_indirect_azalia_reg: index: %u  data: %u\n", -		reg_index, value); -  	return value;  } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h index 5622d5e32d81..dbd2cfed0603 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h @@ -113,6 +113,7 @@ struct dce_audio_shift {  	uint8_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;  	uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO;  	uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO; +	uint32_t CLOCK_GATING_DISABLE;  };  struct dce_audio_mask { @@ -132,6 +133,7 @@ struct dce_audio_mask {  	uint32_t DCCG_AUDIO_DTO2_USE_512FBR_DTO;  	uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO;  	uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO; +	uint32_t CLOCK_GATING_DISABLE;  }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 1e77ffee71b3..f1c61d5aee6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -788,8 +788,9 @@ static bool dce110_link_encoder_validate_hdmi_output(  			crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)  		return false; -	if (!enc110->base.features.flags.bits.HDMI_6GB_EN && -		adjusted_pix_clk_khz >= 300000) +	if ((!enc110->base.features.flags.bits.HDMI_6GB_EN || +			enc110->base.ctx->dc->debug.hdmi20_disable) && +			adjusted_pix_clk_khz >= 300000)  		return false;  	if (enc110->base.ctx->dc->debug.hdmi20_disable &&  		crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 9baf8ca0a920..b1b2e3c6f379 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -56,8 +56,11 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,  		union dmub_inbox0_cmd_lock_hw hw_lock_cmd)  {  	union dmub_inbox0_data_register data = { 0 }; +  	data.inbox0_cmd_lock_hw = hw_lock_cmd; +	dc_dmub_srv_clear_inbox0_ack(dmub_srv);  	dc_dmub_srv_send_inbox0_cmd(dmub_srv, data); +	dc_dmub_srv_wait_for_inbox0_ack(dmub_srv);  }  bool should_use_dmub_lock(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 90eb8eedacf2..87ed48d5530d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -230,7 +230,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_  /**   * Set PSR power optimization flags.   */ -static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt) +static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst)  {  	union dmub_rb_cmd cmd;  	struct dc_context *dc = dmub->ctx; @@ -239,7 +239,9 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt  	cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR;  	cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT;  	cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data); +	cmd.psr_set_power_opt.psr_set_power_opt_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;  	cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; +	cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst;  	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);  	dc_dmub_srv_cmd_execute(dc->dmub_srv); @@ -327,6 +329,16 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,  	copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us;  	copy_settings_data->cmd_version =  DMUB_CMD_PSR_CONTROL_VERSION_1;  	copy_settings_data->panel_inst = panel_inst; +	copy_settings_data->dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1); + +	if (link->fec_state == dc_link_fec_enabled && +		(!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1, +			sizeof(link->dpcd_caps.sink_dev_id_str)) || +		!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2, +			sizeof(link->dpcd_caps.sink_dev_id_str)))) +		copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1; +	else +		copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0;  	dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);  	dc_dmub_srv_cmd_execute(dc->dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h index 5dbd479660f1..01acc01cc191 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h @@ -46,7 +46,7 @@ struct dmub_psr_funcs {  	void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst);  	void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency,  	uint8_t panel_inst); -	void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt); +	void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst);  };  struct dmub_psr *dmub_psr_create(struct dc_context *ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 24e47df526f6..78192ecba102 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -69,6 +69,8 @@  #include "dcn10/dcn10_hw_sequencer.h" +#include "dce110_hw_sequencer.h" +  #define GAMMA_HW_POINTS_NUM 256  /* @@ -1602,6 +1604,11 @@ static enum dc_status apply_single_controller_ctx_to_hw(  			pipe_ctx->stream_res.stream_enc,  			pipe_ctx->stream_res.tg->inst); +	if (dc_is_dp_signal(pipe_ctx->stream->signal) && +		pipe_ctx->stream_res.stream_enc->funcs->reset_fifo) +		pipe_ctx->stream_res.stream_enc->funcs->reset_fifo( +			pipe_ctx->stream_res.stream_enc); +  	if (dc_is_dp_signal(pipe_ctx->stream->signal))  		dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); @@ -1655,30 +1662,12 @@ static enum dc_status apply_single_controller_ctx_to_hw(  static void power_down_encoders(struct dc *dc)  { -	int i, j; +	int i;  	for (i = 0; i < dc->link_count; i++) {  		enum signal_type signal = dc->links[i]->connector_signal; -		if ((signal == SIGNAL_TYPE_EDP) || -			(signal == SIGNAL_TYPE_DISPLAY_PORT)) { -			if (dc->links[i]->link_enc->funcs->get_dig_frontend && -				dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) { -				unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend( -									dc->links[i]->link_enc); - -				for (j = 0; j < dc->res_pool->stream_enc_count; j++) { -					if (fe == dc->res_pool->stream_enc[j]->id) { -						dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], -									dc->res_pool->stream_enc[j]); -						break; -					} -				} -			} - -			if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) -				dp_receiver_power_ctrl(dc->links[i], false); -		} +		dc_link_blank_dp_stream(dc->links[i], false);  		if (signal != SIGNAL_TYPE_EDP)  			signal = SIGNAL_TYPE_NONE; @@ -1805,7 +1794,6 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)  	struct dc_stream_state *edp_streams[MAX_NUM_EDP];  	struct dc_link *edp_link_with_sink = NULL;  	struct dc_link *edp_link = NULL; -	struct dc_stream_state *edp_stream = NULL;  	struct dce_hwseq *hws = dc->hwseq;  	int edp_with_sink_num;  	int edp_num; @@ -1826,27 +1814,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)  	get_edp_streams(context, edp_streams, &edp_stream_num);  	// Check fastboot support, disable on DCE8 because of blank screens -	if (edp_num && dc->ctx->dce_version != DCE_VERSION_8_0 && +	if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 &&  		    dc->ctx->dce_version != DCE_VERSION_8_1 &&  		    dc->ctx->dce_version != DCE_VERSION_8_3) {  		for (i = 0; i < edp_num; i++) {  			edp_link = edp_links[i]; +			if (edp_link != edp_streams[0]->link) +				continue;  			// enable fastboot if backend is enabled on eDP -			if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) { -				/* Set optimization flag on eDP stream*/ -				if (edp_stream_num && edp_link->link_status.link_active) { -					edp_stream = edp_streams[0]; -					can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing); -					edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; -					if (can_apply_edp_fast_boot) -						DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n"); +			if (edp_link->link_enc->funcs->is_dig_enabled && +			    edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) && +			    edp_link->link_status.link_active) { +				struct dc_stream_state *edp_stream = edp_streams[0]; -					break; -				} +				can_apply_edp_fast_boot = !is_edp_ilr_optimization_required(edp_stream->link, &edp_stream->timing); +				edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; +				if (can_apply_edp_fast_boot) +					DC_LOG_EVENT_LINK_TRAINING("eDP fast boot disabled to optimize link rate\n"); + +				break;  			}  		}  		// We are trying to enable eDP, don't power down VDD -		if (edp_stream_num) +		if (can_apply_edp_fast_boot)  			keep_edp_vdd_on = true;  	} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 91fdfcd8a14e..db7ca4b0cdb9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -119,14 +119,6 @@ void dpp_read_state(struct dpp *dpp_base,  	}  } -/* Program gamut remap in bypass mode */ -void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) -{ -	REG_SET(CM_GAMUT_REMAP_CONTROL, 0, -			CM_GAMUT_REMAP_MODE, 0); -	/* Gamut remap in bypass */ -} -  #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))  bool dpp1_get_optimal_number_of_taps( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 44293d66b46b..f607a0e28f14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -39,6 +39,10 @@  #define BLACK_OFFSET_RGB_Y 0x0  #define BLACK_OFFSET_CBCR  0x8000 +#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 +#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 +#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 +  #define REG(reg)\  	dpp->tf_regs->reg @@ -85,51 +89,6 @@ enum dscl_mode_sel {  	DSCL_MODE_DSCL_BYPASS = 6  }; -static void dpp1_dscl_set_overscan( -	struct dcn10_dpp *dpp, -	const struct scaler_data *data) -{ -	uint32_t left = data->recout.x; -	uint32_t top = data->recout.y; - -	int right = data->h_active - data->recout.x - data->recout.width; -	int bottom = data->v_active - data->recout.y - data->recout.height; - -	if (right < 0) { -		BREAK_TO_DEBUGGER(); -		right = 0; -	} -	if (bottom < 0) { -		BREAK_TO_DEBUGGER(); -		bottom = 0; -	} - -	REG_SET_2(DSCL_EXT_OVERSCAN_LEFT_RIGHT, 0, -		EXT_OVERSCAN_LEFT, left, -		EXT_OVERSCAN_RIGHT, right); - -	REG_SET_2(DSCL_EXT_OVERSCAN_TOP_BOTTOM, 0, -		EXT_OVERSCAN_BOTTOM, bottom, -		EXT_OVERSCAN_TOP, top); -} - -static void dpp1_dscl_set_otg_blank( -		struct dcn10_dpp *dpp, const struct scaler_data *data) -{ -	uint32_t h_blank_start = data->h_active; -	uint32_t h_blank_end = 0; -	uint32_t v_blank_start = data->v_active; -	uint32_t v_blank_end = 0; - -	REG_SET_2(OTG_H_BLANK, 0, -			OTG_H_BLANK_START, h_blank_start, -			OTG_H_BLANK_END, h_blank_end); - -	REG_SET_2(OTG_V_BLANK, 0, -			OTG_V_BLANK_START, v_blank_start, -			OTG_V_BLANK_END, v_blank_end); -} -  static int dpp1_dscl_get_pixel_depth_val(enum lb_pixel_depth depth)  {  	if (depth == LB_PIXEL_DEPTH_30BPP) @@ -551,58 +510,6 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d  	return LB_MEMORY_CONFIG_0;  } -void dpp1_dscl_set_scaler_auto_scale( -	struct dpp *dpp_base, -	const struct scaler_data *scl_data) -{ -	enum lb_memory_config lb_config; -	struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); -	enum dscl_mode_sel dscl_mode = dpp1_dscl_get_dscl_mode( -			dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale); -	bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN -				&& scl_data->format <= PIXEL_FORMAT_VIDEO_END; - -	dpp1_dscl_set_overscan(dpp, scl_data); - -	dpp1_dscl_set_otg_blank(dpp, scl_data); - -	REG_UPDATE(SCL_MODE, DSCL_MODE, dscl_mode); - -	if (dscl_mode == DSCL_MODE_DSCL_BYPASS) -		return; - -	lb_config =  dpp1_dscl_find_lb_memory_config(dpp, scl_data); -	dpp1_dscl_set_lb(dpp, &scl_data->lb_params, lb_config); - -	if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) -		return; - -	/* TODO: v_min */ -	REG_SET_3(DSCL_AUTOCAL, 0, -		AUTOCAL_MODE, AUTOCAL_MODE_AUTOSCALE, -		AUTOCAL_NUM_PIPE, 0, -		AUTOCAL_PIPE_ID, 0); - -	/* Black offsets */ -	if (ycbcr) -		REG_SET_2(SCL_BLACK_OFFSET, 0, -				SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, -				SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_CBCR); -	else - -		REG_SET_2(SCL_BLACK_OFFSET, 0, -				SCL_BLACK_OFFSET_RGB_Y, BLACK_OFFSET_RGB_Y, -				SCL_BLACK_OFFSET_CBCR, BLACK_OFFSET_RGB_Y); - -	REG_SET_4(SCL_TAP_CONTROL, 0, -		SCL_V_NUM_TAPS, scl_data->taps.v_taps - 1, -		SCL_H_NUM_TAPS, scl_data->taps.h_taps - 1, -		SCL_V_NUM_TAPS_C, scl_data->taps.v_taps_c - 1, -		SCL_H_NUM_TAPS_C, scl_data->taps.h_taps_c - 1); - -	dpp1_dscl_set_scl_filter(dpp, scl_data, ycbcr); -} -  static void dpp1_dscl_set_manual_ratio_init(  		struct dcn10_dpp *dpp, const struct scaler_data *data) @@ -685,9 +592,17 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,  				 const struct rect *recout)  {  	int visual_confirm_on = 0; +	unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; +  	if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE)  		visual_confirm_on = 1; +	/* Check bounds to ensure the VC bar height was set to a sane value */ +	if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && +			(dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { +		visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height; +	} +  	REG_SET_2(RECOUT_START, 0,  		  /* First pixel of RECOUT in the active OTG area */  		  RECOUT_START_X, recout->x, @@ -699,7 +614,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp,  		  RECOUT_WIDTH, recout->width,  		  /* Number of RECOUT vertical lines */  		  RECOUT_HEIGHT, recout->height -			 - visual_confirm_on * 2 * (dpp->base.inst + 1)); +			 - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height));  }  /** diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 0b788d794fb3..530a72e3eefe 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -77,9 +77,9 @@  #define PGFSM_POWER_ON 0  #define PGFSM_POWER_OFF 2 -void print_microsec(struct dc_context *dc_ctx, -	struct dc_log_buffer_ctx *log_ctx, -	uint32_t ref_cycle) +static void print_microsec(struct dc_context *dc_ctx, +			   struct dc_log_buffer_ctx *log_ctx, +			   uint32_t ref_cycle)  {  	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;  	static const unsigned int frac = 1000; @@ -132,7 +132,8 @@ static void log_mpc_crc(struct dc *dc,  		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));  } -void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx) +static void dcn10_log_hubbub_state(struct dc *dc, +				   struct dc_log_buffer_ctx *log_ctx)  {  	struct dc_context *dc_ctx = dc->ctx;  	struct dcn_hubbub_wm wm; @@ -467,8 +468,6 @@ void dcn10_log_hw_state(struct dc *dc,  	log_mpc_crc(dc, log_ctx);  	{ -		int hpo_dp_link_enc_count = 0; -  		if (pool->hpo_dp_stream_enc_count > 0) {  			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");  			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) { @@ -499,18 +498,14 @@ void dcn10_log_hw_state(struct dc *dc,  		}  		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */ -		for (i = 0; i < dc->link_count; i++) -			if (dc->links[i]->hpo_dp_link_enc) -				hpo_dp_link_enc_count++; - -		if (hpo_dp_link_enc_count) { +		if (pool->hpo_dp_link_enc_count) {  			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n"); -			for (i = 0; i < dc->link_count; i++) { -				struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc; +			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) { +				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];  				struct hpo_dp_link_enc_state hpo_dp_le_state = {0}; -				if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) { +				if (hpo_dp_link_enc->funcs->read_state) {  					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);  					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",  							hpo_dp_link_enc->inst, @@ -1362,11 +1357,53 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)  		tg->funcs->tg_init(tg);  	} + +	/* Power gate DSCs */ +	if (hws->funcs.dsc_pg_control != NULL) { +		uint32_t num_opps = 0; +		uint32_t opp_id_src0 = OPP_ID_INVALID; +		uint32_t opp_id_src1 = OPP_ID_INVALID; + +		// Step 1: To find out which OPTC is running & OPTC DSC is ON +		// We can't use res_pool->res_cap->num_timing_generator to check +		// Because it records display pipes default setting built in driver, +		// not display pipes of the current chip. +		// Some ASICs would be fused display pipes less than the default setting. +		// In dcnxx_resource_construct function, driver would obatin real information. +		for (i = 0; i < dc->res_pool->timing_generator_count; i++) { +			uint32_t optc_dsc_state = 0; +			struct timing_generator *tg = dc->res_pool->timing_generators[i]; + +			if (tg->funcs->is_tg_enabled(tg)) { +				if (tg->funcs->get_dsc_status) +					tg->funcs->get_dsc_status(tg, &optc_dsc_state); +				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block. +				// non-zero value is DSC enabled +				if (optc_dsc_state != 0) { +					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); +					break; +				} +			} +		} + +		// Step 2: To power down DSC but skip DSC  of running OPTC +		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { +			struct dcn_dsc_state s  = {0}; + +			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); + +			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && +				s.dsc_clock_en && s.dsc_fw_en) +				continue; + +			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false); +		} +	}  }  void dcn10_init_hw(struct dc *dc)  { -	int i, j; +	int i;  	struct abm *abm = dc->res_pool->abm;  	struct dmcu *dmcu = dc->res_pool->dmcu;  	struct dce_hwseq *hws = dc->hwseq; @@ -1468,43 +1505,8 @@ void dcn10_init_hw(struct dc *dc)  		dmub_enable_outbox_notification(dc);  	/* we want to turn off all dp displays before doing detection */ -	if (dc->config.power_down_display_on_boot) { -		uint8_t dpcd_power_state = '\0'; -		enum dc_status status = DC_ERROR_UNEXPECTED; - -		for (i = 0; i < dc->link_count; i++) { -			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) -				continue; - -			/* DP 2.0 requires that LTTPR Caps be read first */ -			dp_retrieve_lttpr_cap(dc->links[i]); - -			/* -			 * If any of the displays are lit up turn them off. -			 * The reason is that some MST hubs cannot be turned off -			 * completely until we tell them to do so. -			 * If not turned off, then displays connected to MST hub -			 * won't light up. -			 */ -			status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, -							&dpcd_power_state, sizeof(dpcd_power_state)); -			if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { -				/* blank dp stream before power off receiver*/ -				if (dc->links[i]->link_enc->funcs->get_dig_frontend) { -					unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc); - -					for (j = 0; j < dc->res_pool->stream_enc_count; j++) { -						if (fe == dc->res_pool->stream_enc[j]->id) { -							dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], -										dc->res_pool->stream_enc[j]); -							break; -						} -					} -				} -				dp_receiver_power_ctrl(dc->links[i], false); -			} -		} -	} +	if (dc->config.power_down_display_on_boot) +		dc_link_blank_all_dp_displays(dc);  	/* If taking control over from VBIOS, we may want to optimize our first  	 * mode set, so we need to skip powering down pipes until we know which @@ -1637,7 +1639,7 @@ void dcn10_reset_hw_ctx_wrap(  			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);  			if (hws->funcs.enable_stream_gating) -				hws->funcs.enable_stream_gating(dc, pipe_ctx); +				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);  			if (old_clk)  				old_clk->funcs->cs_power_down(old_clk);  		} @@ -1970,10 +1972,9 @@ static bool wait_for_reset_trigger_to_occur(  	return rc;  } -uint64_t reduceSizeAndFraction( -	uint64_t *numerator, -	uint64_t *denominator, -	bool checkUint32Bounary) +static uint64_t reduceSizeAndFraction(uint64_t *numerator, +				      uint64_t *denominator, +				      bool checkUint32Bounary)  {  	int i;  	bool ret = checkUint32Bounary == false; @@ -2021,7 +2022,7 @@ uint64_t reduceSizeAndFraction(  	return ret;  } -bool is_low_refresh_rate(struct pipe_ctx *pipe) +static bool is_low_refresh_rate(struct pipe_ctx *pipe)  {  	uint32_t master_pipe_refresh_rate =  		pipe->stream->timing.pix_clk_100hz * 100 / @@ -2030,7 +2031,8 @@ bool is_low_refresh_rate(struct pipe_ctx *pipe)  	return master_pipe_refresh_rate <= 30;  } -uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate) +static uint8_t get_clock_divider(struct pipe_ctx *pipe, +				 bool account_low_refresh_rate)  {  	uint32_t clock_divider = 1;  	uint32_t numpipes = 1; @@ -2050,10 +2052,8 @@ uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)  	return clock_divider;  } -int dcn10_align_pixel_clocks( -	struct dc *dc, -	int group_size, -	struct pipe_ctx *grouped_pipes[]) +static int dcn10_align_pixel_clocks(struct dc *dc, int group_size, +				    struct pipe_ctx *grouped_pipes[])  {  	struct dc_context *dc_ctx = dc->ctx;  	int i, master = -1, embedded = -1; @@ -2342,7 +2342,7 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,  } -void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp) +static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)  {  	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);  	struct vm_system_aperture_param apt = {0}; @@ -2624,7 +2624,7 @@ static void dcn10_update_dchubp_dpp(  		/* new calculated dispclk, dppclk are stored in  		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current  		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz. -		 * dcn_validate_bandwidth compute new dispclk, dppclk. +		 * dcn10_validate_bandwidth compute new dispclk, dppclk.  		 * dispclk will put in use after optimize_bandwidth when  		 * ramp_up_dispclk_with_dpp is called.  		 * there are two places for dppclk be put in use. One location @@ -2638,7 +2638,7 @@ static void dcn10_update_dchubp_dpp(  		 * for example, eDP + external dp,  change resolution of DP from  		 * 1920x1080x144hz to 1280x960x60hz.  		 * before change: dispclk = 337889 dppclk = 337889 -		 * change mode, dcn_validate_bandwidth calculate +		 * change mode, dcn10_validate_bandwidth calculate  		 *                dispclk = 143122 dppclk = 143122  		 * update_dchubp_dpp be executed before dispclk be updated,  		 * dispclk = 337889, but dppclk use new value dispclk /2 = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c index 34001a30d449..10e613ec7d24 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c @@ -78,6 +78,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {  	.get_clock = dcn10_get_clock,  	.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,  	.calc_vupdate_position = dcn10_calc_vupdate_position, +	.power_down = dce110_power_down,  	.set_backlight_level = dce110_set_backlight_level,  	.set_abm_immediate_disable = dce110_set_abm_immediate_disable,  	.set_pipe = dce110_set_pipe, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index 2dc4b4e4ba02..f4b34c110eae 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -646,8 +646,9 @@ static bool dcn10_link_encoder_validate_hdmi_output(  			crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)  		return false; -	if (!enc10->base.features.flags.bits.HDMI_6GB_EN && -		adjusted_pix_clk_100hz >= 3000000) +	if ((!enc10->base.features.flags.bits.HDMI_6GB_EN || +			enc10->base.ctx->dc->debug.hdmi20_disable) && +			adjusted_pix_clk_100hz >= 3000000)  		return false;  	if (enc10->base.ctx->dc->debug.hdmi20_disable &&  		crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index d54d731415d7..2c409356f512 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -348,36 +348,6 @@ void opp1_program_stereo(  	*/  } -void opp1_program_oppbuf( -	struct output_pixel_processor *opp, -	struct oppbuf_params *oppbuf) -{ -	struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); - -	/* Program the oppbuf active width to be the frame width from mpc */ -	REG_UPDATE(OPPBUF_CONTROL, OPPBUF_ACTIVE_WIDTH, oppbuf->active_width); - -	/* Specifies the number of segments in multi-segment mode (DP-MSO operation) -	 * description  "In 1/2/4 segment mode, specifies the horizontal active width in pixels of the display panel. -	 * In 4 segment split left/right mode, specifies the horizontal 1/2 active width in pixels of the display panel. -	 * Used to determine segment boundaries in multi-segment mode. Used to determine the width of the vertical active space in 3D frame packed modes. -	 * OPPBUF_ACTIVE_WIDTH must be integer divisible by the total number of segments." -	 */ -	REG_UPDATE(OPPBUF_CONTROL, OPPBUF_DISPLAY_SEGMENTATION, oppbuf->mso_segmentation); - -	/* description  "Specifies the number of overlap pixels (1-8 overlapping pixels supported), used in multi-segment mode (DP-MSO operation)" */ -	REG_UPDATE(OPPBUF_CONTROL, OPPBUF_OVERLAP_PIXEL_NUM, oppbuf->mso_overlap_pixel_num); - -	/* description  "Specifies the number of times a pixel is replicated (0-15 pixel replications supported). -	 * A value of 0 disables replication. The total number of times a pixel is output is OPPBUF_PIXEL_REPETITION + 1." -	 */ -	REG_UPDATE(OPPBUF_CONTROL, OPPBUF_PIXEL_REPETITION, oppbuf->pixel_repetition); - -	/* Controls the number of padded pixels at the end of a segment */ -	if (REG(OPPBUF_CONTROL1)) -		REG_UPDATE(OPPBUF_CONTROL1, OPPBUF_NUM_SEGMENT_PADDED_PIXELS, oppbuf->num_segment_padded_pixels); -} -  void opp1_pipe_clock_control(struct output_pixel_processor *opp, bool enable)  {  	struct dcn10_opp *oppn10 = TO_DCN10_OPP(opp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 3d2a2848857a..b1671b00ce40 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -132,22 +132,6 @@ void optc1_setup_vertical_interrupt2(  }  /** - * Vupdate keepout can be set to a window to block the update lock for that pipe from changing. - * Start offset begins with vstartup and goes for x number of clocks, - * end offset starts from end of vupdate to x number of clocks. - */ -void optc1_set_vupdate_keepout(struct timing_generator *optc, -			       struct vupdate_keepout_params *params) -{ -	struct optc *optc1 = DCN10TG_FROM_TG(optc); - -	REG_SET_3(OTG_VUPDATE_KEEPOUT, 0, -		  MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, params->start_offset, -		  MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, params->end_offset, -		  OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, params->enable); -} - -/**   * program_timing_generator   used by mode timing set   * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition.   * Including SYNC. Call BIOS command table to program Timings. @@ -876,7 +860,7 @@ void optc1_set_static_screen_control(  			OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);  } -void optc1_setup_manual_trigger(struct timing_generator *optc) +static void optc1_setup_manual_trigger(struct timing_generator *optc)  {  	struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -894,7 +878,7 @@ void optc1_setup_manual_trigger(struct timing_generator *optc)  			OTG_TRIGA_CLEAR, 1);  } -void optc1_program_manual_trigger(struct timing_generator *optc) +static void optc1_program_manual_trigger(struct timing_generator *optc)  {  	struct optc *optc1 = DCN10TG_FROM_TG(optc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index f37551e00023..858b72149897 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -686,9 +686,8 @@ static struct output_pixel_processor *dcn10_opp_create(  	return &opp->base;  } -struct dce_aux *dcn10_aux_engine_create( -	struct dc_context *ctx, -	uint32_t inst) +static struct dce_aux *dcn10_aux_engine_create(struct dc_context *ctx, +					       uint32_t inst)  {  	struct aux_engine_dce110 *aux_engine =  		kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); @@ -724,9 +723,8 @@ static const struct dce_i2c_mask i2c_masks = {  		I2C_COMMON_MASK_SH_LIST_DCE110(_MASK)  }; -struct dce_i2c_hw *dcn10_i2c_hw_create( -	struct dc_context *ctx, -	uint32_t inst) +static struct dce_i2c_hw *dcn10_i2c_hw_create(struct dc_context *ctx, +					      uint32_t inst)  {  	struct dce_i2c_hw *dce_i2c_hw =  		kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); @@ -805,7 +803,7 @@ static const struct encoder_feature_support link_enc_feature = {  		.flags.bits.IS_TPS4_CAPABLE = true  }; -struct link_encoder *dcn10_link_encoder_create( +static struct link_encoder *dcn10_link_encoder_create(  	const struct encoder_init_data *enc_init_data)  {  	struct dcn10_link_encoder *enc10 = @@ -847,7 +845,7 @@ static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_d  	return &panel_cntl->base;  } -struct clock_source *dcn10_clock_source_create( +static struct clock_source *dcn10_clock_source_create(  	struct dc_context *ctx,  	struct dc_bios *bios,  	enum clock_source_id id, @@ -945,7 +943,7 @@ static const struct resource_create_funcs res_create_maximus_funcs = {  	.create_hwseq = dcn10_hwseq_create,  }; -void dcn10_clock_source_destroy(struct clock_source **clk_src) +static void dcn10_clock_source_destroy(struct clock_source **clk_src)  {  	kfree(TO_DCE110_CLK_SRC(*clk_src));  	*clk_src = NULL; @@ -978,10 +976,8 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)  		pool->base.mpc = NULL;  	} -	if (pool->base.hubbub != NULL) { -		kfree(pool->base.hubbub); -		pool->base.hubbub = NULL; -	} +	kfree(pool->base.hubbub); +	pool->base.hubbub = NULL;  	for (i = 0; i < pool->base.pipe_count; i++) {  		if (pool->base.opps[i] != NULL) @@ -1011,14 +1007,10 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool)  	for (i = 0; i < pool->base.res_cap->num_ddc; i++) {  		if (pool->base.engines[i] != NULL)  			dce110_engine_destroy(&pool->base.engines[i]); -		if (pool->base.hw_i2cs[i] != NULL) { -			kfree(pool->base.hw_i2cs[i]); -			pool->base.hw_i2cs[i] = NULL; -		} -		if (pool->base.sw_i2cs[i] != NULL) { -			kfree(pool->base.sw_i2cs[i]); -			pool->base.sw_i2cs[i] = NULL; -		} +		kfree(pool->base.hw_i2cs[i]); +		pool->base.hw_i2cs[i] = NULL; +		kfree(pool->base.sw_i2cs[i]); +		pool->base.sw_i2cs[i] = NULL;  	}  	for (i = 0; i < pool->base.audio_count; i++) { @@ -1128,7 +1120,7 @@ static enum dc_status build_mapped_resource(  	return DC_OK;  } -enum dc_status dcn10_add_stream_to_ctx( +static enum dc_status dcn10_add_stream_to_ctx(  		struct dc *dc,  		struct dc_state *new_ctx,  		struct dc_stream_state *dc_stream) @@ -1320,7 +1312,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {  	.destroy = dcn10_destroy_resource_pool,  	.link_enc_create = dcn10_link_encoder_create,  	.panel_cntl_create = dcn10_panel_cntl_create, -	.validate_bandwidth = dcn_validate_bandwidth, +	.validate_bandwidth = dcn10_validate_bandwidth,  	.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,  	.validate_plane = dcn10_validate_plane,  	.validate_global = dcn10_validate_global, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index b0c08ee6bc2c..bf4436d7aaab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -902,6 +902,19 @@ void enc1_stream_encoder_stop_dp_info_packets(  } +void enc1_stream_encoder_reset_fifo( +	struct stream_encoder *enc) +{ +	struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + +	/* set DIG_START to 0x1 to reset FIFO */ +	REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); +	udelay(100); + +	/* write 0 to take the FIFO out of reset */ +	REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); +} +  void enc1_stream_encoder_dp_blank(  	struct dc_link *link,  	struct stream_encoder *enc) @@ -1587,6 +1600,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {  		enc1_stream_encoder_send_immediate_sdp_message,  	.stop_dp_info_packets =  		enc1_stream_encoder_stop_dp_info_packets, +	.reset_fifo = +		enc1_stream_encoder_reset_fifo,  	.dp_blank =  		enc1_stream_encoder_dp_blank,  	.dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 687d7e4bf7ca..a146a41f68e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -626,6 +626,9 @@ void enc1_stream_encoder_send_immediate_sdp_message(  void enc1_stream_encoder_stop_dp_info_packets(  	struct stream_encoder *enc); +void enc1_stream_encoder_reset_fifo( +	struct stream_encoder *enc); +  void enc1_stream_encoder_dp_blank(  	struct dc_link *link,  	struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index a9e420c7d75a..970b65efeac1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -251,20 +251,6 @@ static void dpp2_cnv_setup (  } -void dpp2_cnv_set_bias_scale( -		struct dpp *dpp_base, -		struct  dc_bias_and_scale *bias_and_scale) -{ -	struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); - -	REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red); -	REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green); -	REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue); -	REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red); -	REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green); -	REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue); -} -  /*compute the maximum number of lines that we can fit in the line buffer*/  void dscl2_calc_lb_num_partitions(  		const struct scaler_data *scl_data, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 79b640e202eb..ef5c4c0f4d6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -162,6 +162,8 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds  	REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width);  	REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height);  	REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset); +	REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en, +		DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source);  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c index 880954ac0b02..994fb732a7cb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c @@ -527,7 +527,7 @@ static const uint16_t filter_12tap_16p_183[108] = {  	0, 84, 16328, 16032, 416, 1944, 1944, 416, 16032, 16328, 84, 0,  }; -const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio) +static const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)  {  	if (ratio.value < dc_fixpt_one.value)  		return filter_3tap_16p_upscale; @@ -539,7 +539,7 @@ const uint16_t *wbscl_get_filter_3tap_16p(struct fixed31_32 ratio)  		return filter_3tap_16p_183;  } -const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio) +static const uint16_t *wbscl_get_filter_4tap_16p(struct fixed31_32 ratio)  {  	if (ratio.value < dc_fixpt_one.value)  		return filter_4tap_16p_upscale; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 5adf42a7cc27..dc1752e9f461 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -192,9 +192,8 @@ void hubp2_vready_at_or_After_vsync(struct hubp *hubp,  	REG_UPDATE(DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, value);  } -void hubp2_program_requestor( -		struct hubp *hubp, -		struct _vcs_dpi_display_rq_regs_st *rq_regs) +static void hubp2_program_requestor(struct hubp *hubp, +				    struct _vcs_dpi_display_rq_regs_st *rq_regs)  {  	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); @@ -930,6 +929,16 @@ bool hubp2_is_flip_pending(struct hubp *hubp)  void hubp2_set_blank(struct hubp *hubp, bool blank)  { +	hubp2_set_blank_regs(hubp, blank); + +	if (blank) { +		hubp->mpcc_id = 0xf; +		hubp->opp_id = OPP_ID_INVALID; +	} +} + +void hubp2_set_blank_regs(struct hubp *hubp, bool blank) +{  	struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);  	uint32_t blank_en = blank ? 1 : 0; @@ -951,9 +960,6 @@ void hubp2_set_blank(struct hubp *hubp, bool blank)  					HUBP_NO_OUTSTANDING_REQ, 1,  					1, 200);  		} - -		hubp->mpcc_id = 0xf; -		hubp->opp_id = OPP_ID_INVALID;  	}  } @@ -1285,7 +1291,7 @@ void hubp2_read_state(struct hubp *hubp)  } -void hubp2_validate_dml_output(struct hubp *hubp, +static void hubp2_validate_dml_output(struct hubp *hubp,  		struct dc_context *ctx,  		struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,  		struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, @@ -1603,6 +1609,7 @@ static struct hubp_funcs dcn20_hubp_funcs = {  	.hubp_setup_interdependent = hubp2_setup_interdependent,  	.hubp_set_vm_system_aperture_settings = hubp2_set_vm_system_aperture_settings,  	.set_blank = hubp2_set_blank, +	.set_blank_regs = hubp2_set_blank_regs,  	.dcc_control = hubp2_dcc_control,  	.mem_program_viewport = min_set_viewport,  	.set_cursor_attributes	= hubp2_cursor_set_attributes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h index eea2254b15e4..9204c3ef323b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.h @@ -330,6 +330,7 @@ void hubp2_program_surface_config(  bool hubp2_is_flip_pending(struct hubp *hubp);  void hubp2_set_blank(struct hubp *hubp, bool blank); +void hubp2_set_blank_regs(struct hubp *hubp, bool blank);  void hubp2_cursor_set_position(  		struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 4f88376a118f..4991e93e5308 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -615,6 +615,11 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)  					pipe_ctx->pipe_idx);  } +void dcn20_disable_pixel_data(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank) +{ +	dcn20_blank_pixel_data(dc, pipe_ctx, blank); +} +  static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream,  		int opp_cnt)  { @@ -1080,10 +1085,8 @@ static void dcn20_power_on_plane(  	}  } -void dcn20_enable_plane( -	struct dc *dc, -	struct pipe_ctx *pipe_ctx, -	struct dc_state *context) +static void dcn20_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx, +			       struct dc_state *context)  {  	//if (dc->debug.sanity_checks) {  	//	dcn10_verify_allow_pstate_change_high(dc); @@ -1842,6 +1845,11 @@ void dcn20_optimize_bandwidth(  					dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,  					true); +	if (dc->clk_mgr->dc_mode_softmax_enabled) +		if (dc->clk_mgr->clks.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && +				context->bw_ctx.bw.dcn.clk.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) +			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->dc_mode_softmax_memclk); +  	dc->clk_mgr->funcs->update_clocks(  			dc->clk_mgr,  			context, @@ -2270,7 +2278,7 @@ void dcn20_reset_hw_ctx_wrap(  			dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);  			if (hws->funcs.enable_stream_gating) -				hws->funcs.enable_stream_gating(dc, pipe_ctx); +				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);  			if (old_clk)  				old_clk->funcs->cs_power_down(old_clk);  		} @@ -2406,7 +2414,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)  		pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link(  				pipe_ctx->stream_res.hpo_dp_stream_enc,  				pipe_ctx->stream_res.hpo_dp_stream_enc->inst, -				link->hpo_dp_link_enc->inst); +				pipe_ctx->link_res.hpo_dp_link_enc->inst);  	}  	if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 6bba191cd33e..33a36c02b2f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -53,6 +53,10 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);  void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,  		struct dc_link_settings *link_settings);  void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_disable_pixel_data( +		struct dc *dc, +		struct pipe_ctx *pipe_ctx, +		bool blank);  void dcn20_blank_pixel_data(  		struct dc *dc,  		struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 5cfd4b0afea5..91e4885b743e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -27,6 +27,8 @@  #include "dcn10/dcn10_hw_sequencer.h"  #include "dcn20_hwseq.h" +#include "dcn20_init.h" +  static const struct hw_sequencer_funcs dcn20_funcs = {  	.program_gamut_remap = dcn10_program_gamut_remap,  	.init_hw = dcn10_init_hw, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c index 947eb0df3f12..15734db0cdea 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c @@ -400,10 +400,9 @@ static void mpc20_program_ogam_pwl(  } -void apply_DEDCN20_305_wa( -		struct mpc *mpc, -		int mpcc_id, enum dc_lut_mode current_mode, -		enum dc_lut_mode next_mode) +static void apply_DEDCN20_305_wa(struct mpc *mpc, int mpcc_id, +				 enum dc_lut_mode current_mode, +				 enum dc_lut_mode next_mode)  {  	struct dcn20_mpc *mpc20 = TO_DCN20_MPC(mpc); @@ -525,7 +524,7 @@ static void mpc2_init_mpcc(struct mpcc *mpcc, int mpcc_inst)  	mpcc->sm_cfg.enable = false;  } -struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id) +static struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)  {  	struct mpcc *tmp_mpcc = tree->opp_list; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index c90b8516dcc1..0340fdd3f5fb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -73,21 +73,6 @@ bool optc2_enable_crtc(struct timing_generator *optc)  }  /** - * DRR double buffering control to select buffer point - * for V_TOTAL, H_TOTAL, VTOTAL_MIN, VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers - * Options: anytime, start of frame, dp start of frame (range timing) - */ -void optc2_set_timing_db_mode(struct timing_generator *optc, bool enable) -{ -	struct optc *optc1 = DCN10TG_FROM_TG(optc); - -	uint32_t blank_data_double_buffer_enable = enable ? 1 : 0; - -	REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, -		OTG_RANGE_TIMING_DBUF_UPDATE_MODE, blank_data_double_buffer_enable); -} - -/**   *For the below, I'm not sure how your GSL parameters are stored in your env,   * so I will assume a gsl_params struct for now   */ @@ -110,30 +95,6 @@ void optc2_set_gsl(struct timing_generator *optc,  } -/* Use the gsl allow flip as the master update lock */ -void optc2_use_gsl_as_master_update_lock(struct timing_generator *optc, -		   const struct gsl_params *params) -{ -	struct optc *optc1 = DCN10TG_FROM_TG(optc); - -	REG_UPDATE(OTG_GSL_CONTROL, -		OTG_MASTER_UPDATE_LOCK_GSL_EN, params->master_update_lock_gsl_en); -} - -/* You can control the GSL timing by limiting GSL to a window (X,Y) */ -void optc2_set_gsl_window(struct timing_generator *optc, -		   const struct gsl_params *params) -{ -	struct optc *optc1 = DCN10TG_FROM_TG(optc); - -	REG_SET_2(OTG_GSL_WINDOW_X, 0, -		OTG_GSL_WINDOW_START_X, params->gsl_window_start_x, -		OTG_GSL_WINDOW_END_X, params->gsl_window_end_x); -	REG_SET_2(OTG_GSL_WINDOW_Y, 0, -		OTG_GSL_WINDOW_START_Y, params->gsl_window_start_y, -		OTG_GSL_WINDOW_END_Y, params->gsl_window_end_y); -} -  void optc2_set_gsl_source_select(  		struct timing_generator *optc,  		int group_idx, @@ -156,18 +117,6 @@ void optc2_set_gsl_source_select(  	}  } -/* DSC encoder frame start controls: x = h position, line_num = # of lines from vstartup */ -void optc2_set_dsc_encoder_frame_start(struct timing_generator *optc, -					int x_position, -					int line_num) -{ -	struct optc *optc1 = DCN10TG_FROM_TG(optc); - -	REG_SET_2(OTG_DSC_START_POSITION, 0, -			OTG_DSC_START_POSITION_X, x_position, -			OTG_DSC_START_POSITION_LINE_NUM, line_num); -} -  /* Set DSC-related configuration.   *   dsc_mode: 0 disables DSC, other values enable DSC in specified format   *   sc_bytes_per_pixel: Bytes per pixel in u3.28 format @@ -190,6 +139,19 @@ void optc2_set_dsc_config(struct timing_generator *optc,  		OPTC_DSC_SLICE_WIDTH, dsc_slice_width);  } +/* Get DSC-related configuration. + *   dsc_mode: 0 disables DSC, other values enable DSC in specified format + */ +void optc2_get_dsc_status(struct timing_generator *optc, +					uint32_t *dsc_mode) +{ +	struct optc *optc1 = DCN10TG_FROM_TG(optc); + +	REG_GET(OPTC_DATA_FORMAT_CONTROL, +		OPTC_DSC_MODE, dsc_mode); +} + +  /*TEMP: Need to figure out inheritance model here.*/  bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)  { @@ -280,8 +242,8 @@ void optc2_get_optc_source(struct timing_generator *optc,  		*num_of_src_opp = 1;  } -void optc2_set_dwb_source(struct timing_generator *optc, -		uint32_t dwb_pipe_inst) +static void optc2_set_dwb_source(struct timing_generator *optc, +				 uint32_t dwb_pipe_inst)  {  	struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -293,7 +255,7 @@ void optc2_set_dwb_source(struct timing_generator *optc,  				OPTC_DWB1_SOURCE_SELECT, optc->inst);  } -void optc2_align_vblanks( +static void optc2_align_vblanks(  	struct timing_generator *optc_master,  	struct timing_generator *optc_slave,  	uint32_t master_pixel_clock_100Hz, @@ -579,6 +541,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {  		.get_crc = optc1_get_crc,  		.configure_crc = optc2_configure_crc,  		.set_dsc_config = optc2_set_dsc_config, +		.get_dsc_status = optc2_get_dsc_status,  		.set_dwb_source = optc2_set_dwb_source,  		.set_odm_bypass = optc2_set_odm_bypass,  		.set_odm_combine = optc2_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index be19a6885fbf..f7968b9ca16e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -98,6 +98,9 @@ void optc2_set_dsc_config(struct timing_generator *optc,  					uint32_t dsc_bytes_per_pixel,  					uint32_t dsc_slice_width); +void optc2_get_dsc_status(struct timing_generator *optc, +					uint32_t *dsc_mode); +  void optc2_set_odm_bypass(struct timing_generator *optc,  		const struct dc_crtc_timing *dc_crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 3883f918b3bb..2bc93df023ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {  		.timing_trace = false,  		.clock_trace = true,  		.disable_pplib_clock_request = true, -		.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, +		.pipe_split_policy = MPC_SPLIT_DYNAMIC,  		.force_single_disp_pipe_split = false,  		.disable_dcc = DCC_ENABLE,  		.vsr_support = true, @@ -3093,8 +3093,7 @@ static enum dcn_zstate_support_state  decide_zstate_support(struct dc *dc, struc  	else if (context->stream_count == 1 &&  context->streams[0]->signal == SIGNAL_TYPE_EDP) {  		struct dc_link *link = context->streams[0]->sink->link; -		if ((link->link_index == 0 && link->psr_settings.psr_feature_enabled) -				|| context->bw_ctx.dml.vba.StutterPeriod > 5000.0) +		if (link->link_index == 0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)  			return DCN_ZSTATE_SUPPORT_ALLOW;  		else  			return DCN_ZSTATE_SUPPORT_DISALLOW; @@ -3796,6 +3795,8 @@ static bool dcn20_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	dc->caps.hdmi_frl_pcon_support = true; +  	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {  		dc->debug = debug_defaults_drv;  	} else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index aab25ca8343a..8a70f92795c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -593,6 +593,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {  		enc1_stream_encoder_send_immediate_sdp_message,  	.stop_dp_info_packets =  		enc1_stream_encoder_stop_dp_info_packets, +	.reset_fifo = +		enc1_stream_encoder_reset_fifo,  	.dp_blank =  		enc1_stream_encoder_dp_blank,  	.dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c index f5bf04f7da25..9a3402148fde 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c @@ -44,7 +44,8 @@  #define DC_LOGGER \  	dccg->ctx->logger -void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) +static void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, +				   int req_dppclk)  {  	/* vbios handles it */  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c index 6b6f74d4afd1..35dd4bac242a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c @@ -55,7 +55,7 @@ static void hubp201_program_surface_config(  	hubp1_program_pixel_format(hubp, format);  } -void hubp201_program_deadline( +static void hubp201_program_deadline(  		struct hubp *hubp,  		struct _vcs_dpi_display_dlg_regs_st *dlg_attr,  		struct _vcs_dpi_display_ttu_regs_st *ttu_attr) @@ -63,9 +63,8 @@ void hubp201_program_deadline(  	hubp1_program_deadline(hubp, dlg_attr, ttu_attr);  } -void hubp201_program_requestor( -		struct hubp *hubp, -		struct _vcs_dpi_display_rq_regs_st *rq_regs) +static void hubp201_program_requestor(struct hubp *hubp, +				      struct _vcs_dpi_display_rq_regs_st *rq_regs)  {  	struct dcn201_hubp *hubp201 = TO_DCN201_HUBP(hubp); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c index cfd09b3f705e..fe22530242d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c @@ -134,11 +134,12 @@ void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)  	PHYSICAL_ADDRESS_LOC addr;  	struct dc_plane_state *plane_state = pipe_ctx->plane_state;  	struct dce_hwseq *hws = dc->hwseq; -	struct dc_plane_address uma = plane_state->address; +	struct dc_plane_address uma;  	if (plane_state == NULL)  		return; +	uma = plane_state->address;  	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);  	plane_address_in_gpu_space_to_uma(hws, &uma); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c index a65e8f7801db..7f9ec59ef443 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c @@ -50,8 +50,8 @@  #define IND_REG(index) \  	(enc10->link_regs->index) -void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc, -	struct dc_link_settings *link_settings) +static void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc, +						 struct dc_link_settings *link_settings)  {  	uint32_t value1, value2;  	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); @@ -66,7 +66,7 @@ void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc,  	}  } -bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc) +static bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc)  {  	uint32_t value;  	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c index 0fa381088d1d..0bb7d3dd53fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c @@ -603,7 +603,7 @@ static const struct dc_debug_options debug_defaults_drv = {  		.timing_trace = false,  		.clock_trace = true,  		.disable_pplib_clock_request = true, -		.pipe_split_policy = MPC_SPLIT_AVOID, +		.pipe_split_policy = MPC_SPLIT_DYNAMIC,  		.force_single_disp_pipe_split = false,  		.disable_dcc = DCC_ENABLE,  		.vsr_support = true, @@ -672,9 +672,8 @@ static struct output_pixel_processor *dcn201_opp_create(  	return &opp->base;  } -struct dce_aux *dcn201_aux_engine_create( -	struct dc_context *ctx, -	uint32_t inst) +static struct dce_aux *dcn201_aux_engine_create(struct dc_context *ctx, +						uint32_t inst)  {  	struct aux_engine_dce110 *aux_engine =  		kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC); @@ -706,9 +705,8 @@ static const struct dce_i2c_mask i2c_masks = {  		I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)  }; -struct dce_i2c_hw *dcn201_i2c_hw_create( -	struct dc_context *ctx, -	uint32_t inst) +static struct dce_i2c_hw *dcn201_i2c_hw_create(struct dc_context *ctx, +					       uint32_t inst)  {  	struct dce_i2c_hw *dce_i2c_hw =  		kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC); @@ -789,7 +787,7 @@ static const struct encoder_feature_support link_enc_feature = {  		.flags.bits.IS_TPS4_CAPABLE = true  }; -struct link_encoder *dcn201_link_encoder_create( +static struct link_encoder *dcn201_link_encoder_create(  	const struct encoder_init_data *enc_init_data)  {  	struct dcn20_link_encoder *enc20 = @@ -811,7 +809,7 @@ struct link_encoder *dcn201_link_encoder_create(  	return &enc10->base;  } -struct clock_source *dcn201_clock_source_create( +static struct clock_source *dcn201_clock_source_create(  	struct dc_context *ctx,  	struct dc_bios *bios,  	enum clock_source_id id, @@ -906,7 +904,7 @@ static const struct resource_create_funcs res_create_maximus_funcs = {  	.create_hwseq = dcn201_hwseq_create,  }; -void dcn201_clock_source_destroy(struct clock_source **clk_src) +static void dcn201_clock_source_destroy(struct clock_source **clk_src)  {  	kfree(TO_DCE110_CLK_SRC(*clk_src));  	*clk_src = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index 36044cb8ec83..c5e200d09038 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -680,7 +680,7 @@ void hubbub21_wm_read_state(struct hubbub *hubbub,  			DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage);  } -void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) +static void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub)  {  	struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);  	uint32_t prog_wm_value; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index 3de1bcf9b3d8..58e459c7e7d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -183,7 +183,7 @@ static void hubp21_setup(  } -void hubp21_set_viewport( +static void hubp21_set_viewport(  	struct hubp *hubp,  	const struct rect *viewport,  	const struct rect *viewport_c) @@ -225,8 +225,8 @@ void hubp21_set_viewport(  		  SEC_VIEWPORT_Y_START_C, viewport_c->y);  } -void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, -		struct vm_system_aperture_param *apt) +static void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, +						   struct vm_system_aperture_param *apt)  {  	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); @@ -248,7 +248,7 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,  			SYSTEM_ACCESS_MODE, 0x3);  } -void hubp21_validate_dml_output(struct hubp *hubp, +static void hubp21_validate_dml_output(struct hubp *hubp,  		struct dc_context *ctx,  		struct _vcs_dpi_display_rq_regs_st *dml_rq_regs,  		struct _vcs_dpi_display_dlg_regs_st *dml_dlg_attr, @@ -664,7 +664,8 @@ static void program_surface_flip_and_addr(struct hubp *hubp, struct surface_flip  			flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS);  } -void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_regs) +static void dmcub_PLAT_54186_wa(struct hubp *hubp, +				struct surface_flip_registers *flip_regs)  {  	struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;  	struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); @@ -697,7 +698,7 @@ void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_  	PERF_TRACE();  // TODO: remove after performance is stable.  } -bool hubp21_program_surface_flip_and_addr( +static bool hubp21_program_surface_flip_and_addr(  		struct hubp *hubp,  		const struct dc_plane_address *address,  		bool flip_immediate) @@ -805,7 +806,7 @@ bool hubp21_program_surface_flip_and_addr(  	return true;  } -void hubp21_init(struct hubp *hubp) +static void hubp21_init(struct hubp *hubp)  {  	// DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta  	// This is a chicken bit to enable the ECO fix. diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 54c11ba550ae..b270f0b194dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -28,6 +28,8 @@  #include "dcn20/dcn20_hwseq.h"  #include "dcn21_hwseq.h" +#include "dcn21_init.h" +  static const struct hw_sequencer_funcs dcn21_funcs = {  	.program_gamut_remap = dcn10_program_gamut_remap,  	.init_hw = dcn10_init_hw, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c index aa46c35b05a2..0a1ba6e7081c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -203,7 +203,7 @@ static bool update_cfg_data(  	return true;  } -bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc) +static bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)  {  	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);  	int value; @@ -277,7 +277,7 @@ void dcn21_link_encoder_enable_dp_output(  } -void dcn21_link_encoder_enable_dp_mst_output( +static void dcn21_link_encoder_enable_dp_mst_output(  	struct link_encoder *enc,  	const struct dc_link_settings *link_settings,  	enum clock_source_id clock_source) @@ -288,9 +288,8 @@ void dcn21_link_encoder_enable_dp_mst_output(  	dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);  } -void dcn21_link_encoder_disable_output( -	struct link_encoder *enc, -	enum signal_type signal) +static void dcn21_link_encoder_disable_output(struct link_encoder *enc, +					      enum signal_type signal)  {  	dcn10_link_encoder_disable_output(enc, signal); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index d452a0d1777e..e5cc6bf45743 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -784,9 +784,8 @@ static const struct dce_i2c_mask i2c_masks = {  		I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)  }; -struct dce_i2c_hw *dcn21_i2c_hw_create( -	struct dc_context *ctx, -	uint32_t inst) +static struct dce_i2c_hw *dcn21_i2c_hw_create(struct dc_context *ctx, +					      uint32_t inst)  {  	struct dce_i2c_hw *dce_i2c_hw =  		kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); @@ -874,7 +873,7 @@ static const struct dc_debug_options debug_defaults_drv = {  		.clock_trace = true,  		.disable_pplib_clock_request = true,  		.min_disp_clk_khz = 100000, -		.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, +		.pipe_split_policy = MPC_SPLIT_DYNAMIC,  		.force_single_disp_pipe_split = false,  		.disable_dcc = DCC_ENABLE,  		.vsr_support = true, @@ -1093,7 +1092,7 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s  	}  } -void dcn21_calculate_wm( +static void dcn21_calculate_wm(  		struct dc *dc, struct dc_state *context,  		display_e2e_pipe_params_st *pipes,  		int *out_pipe_cnt, @@ -1390,7 +1389,7 @@ validate_out:   * with DC_FP_START()/DC_FP_END(). Use the same approach as for   * dcn20_validate_bandwidth in dcn20_resource.c.   */ -bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, +static bool dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context,  		bool fast_validate)  {  	bool voltage_supported; @@ -1480,8 +1479,8 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx)  	return &hubbub->base;  } -struct output_pixel_processor *dcn21_opp_create( -	struct dc_context *ctx, uint32_t inst) +static struct output_pixel_processor *dcn21_opp_create(struct dc_context *ctx, +						       uint32_t inst)  {  	struct dcn20_opp *opp =  		kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); @@ -1496,9 +1495,8 @@ struct output_pixel_processor *dcn21_opp_create(  	return &opp->base;  } -struct timing_generator *dcn21_timing_generator_create( -		struct dc_context *ctx, -		uint32_t instance) +static struct timing_generator *dcn21_timing_generator_create(struct dc_context *ctx, +							      uint32_t instance)  {  	struct optc *tgn10 =  		kzalloc(sizeof(struct optc), GFP_KERNEL); @@ -1518,7 +1516,7 @@ struct timing_generator *dcn21_timing_generator_create(  	return &tgn10->base;  } -struct mpc *dcn21_mpc_create(struct dc_context *ctx) +static struct mpc *dcn21_mpc_create(struct dc_context *ctx)  {  	struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),  					  GFP_KERNEL); @@ -1545,8 +1543,8 @@ static void read_dce_straps(  } -struct display_stream_compressor *dcn21_dsc_create( -	struct dc_context *ctx, uint32_t inst) +static struct display_stream_compressor *dcn21_dsc_create(struct dc_context *ctx, +							  uint32_t inst)  {  	struct dcn20_dsc *dsc =  		kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL); @@ -1683,9 +1681,8 @@ static struct dc_cap_funcs cap_funcs = {  	.get_dcc_compression_cap = dcn20_get_dcc_compression_cap  }; -struct stream_encoder *dcn21_stream_encoder_create( -	enum engine_id eng_id, -	struct dc_context *ctx) +static struct stream_encoder *dcn21_stream_encoder_create(enum engine_id eng_id, +							  struct dc_context *ctx)  {  	struct dcn10_stream_encoder *enc1 =  		kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); @@ -1917,7 +1914,7 @@ static int dcn21_populate_dml_pipes_from_context(  	return pipe_cnt;  } -enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state) +static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)  {  	enum dc_status result = DC_OK; @@ -2028,6 +2025,8 @@ static bool dcn21_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	dc->caps.hdmi_frl_pcon_support = true; +  	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)  		dc->debug = debug_defaults_drv;  	else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index ebd9c35c914f..8daa12730bc1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -50,22 +50,6 @@  	enc1->base.ctx -void convert_dc_info_packet_to_128( -	const struct dc_info_packet *info_packet, -	struct dc_info_packet_128 *info_packet_128) -{ -	unsigned int i; - -	info_packet_128->hb0 = info_packet->hb0; -	info_packet_128->hb1 = info_packet->hb1; -	info_packet_128->hb2 = info_packet->hb2; -	info_packet_128->hb3 = info_packet->hb3; - -	for (i = 0; i < 32; i++) { -		info_packet_128->sb[i] = info_packet->sb[i]; -	} - -}  static void enc3_update_hdmi_info_packet(  	struct dcn10_stream_encoder *enc1,  	uint32_t packet_index, @@ -489,7 +473,7 @@ static void enc3_dp_set_odm_combine(  }  /* setup stream encoder in dvi mode */ -void enc3_stream_encoder_dvi_set_stream_attribute( +static void enc3_stream_encoder_dvi_set_stream_attribute(  	struct stream_encoder *enc,  	struct dc_crtc_timing *crtc_timing,  	bool is_dual_link) @@ -805,6 +789,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {  		enc3_stream_encoder_update_dp_info_packets,  	.stop_dp_info_packets =  		enc1_stream_encoder_stop_dp_info_packets, +	.reset_fifo = +		enc1_stream_encoder_reset_fifo,  	.dp_blank =  		enc1_stream_encoder_dp_blank,  	.dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index c1d967ed6551..ab3918c0a15b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -41,8 +41,7 @@  	dpp->tf_shift->field_name, dpp->tf_mask->field_name -void dpp30_read_state(struct dpp *dpp_base, -		struct dcn_dpp_state *s) +static void dpp30_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s)  {  	struct dcn20_dpp *dpp = TO_DCN20_DPP(dpp_base); @@ -373,7 +372,7 @@ void dpp3_set_cursor_attributes(  } -bool dpp3_get_optimal_number_of_taps( +static bool dpp3_get_optimal_number_of_taps(  		struct dpp *dpp,  		struct scaler_data *scl_data,  		const struct scaling_taps *in_taps) @@ -474,22 +473,7 @@ bool dpp3_get_optimal_number_of_taps(  	return true;  } -void dpp3_cnv_set_bias_scale( -		struct dpp *dpp_base, -		struct  dc_bias_and_scale *bias_and_scale) -{ -	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); - -	REG_UPDATE(FCNV_FP_BIAS_R, FCNV_FP_BIAS_R, bias_and_scale->bias_red); -	REG_UPDATE(FCNV_FP_BIAS_G, FCNV_FP_BIAS_G, bias_and_scale->bias_green); -	REG_UPDATE(FCNV_FP_BIAS_B, FCNV_FP_BIAS_B, bias_and_scale->bias_blue); -	REG_UPDATE(FCNV_FP_SCALE_R, FCNV_FP_SCALE_R, bias_and_scale->scale_red); -	REG_UPDATE(FCNV_FP_SCALE_G, FCNV_FP_SCALE_G, bias_and_scale->scale_green); -	REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue); -} - -void dpp3_deferred_update( -	struct dpp *dpp_base) +static void dpp3_deferred_update(struct dpp *dpp_base)  {  	int bypass_state;  	struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base); @@ -751,8 +735,8 @@ static enum dc_lut_mode dpp3_get_blndgam_current(struct dpp *dpp_base)  		return mode;  } -bool dpp3_program_blnd_lut( -	struct dpp *dpp_base, const struct pwl_params *params) +static bool dpp3_program_blnd_lut(struct dpp *dpp_base, +				  const struct pwl_params *params)  {  	enum dc_lut_mode current_mode;  	enum dc_lut_mode next_mode; @@ -1164,9 +1148,8 @@ static void dpp3_program_shaper_lutb_settings(  } -bool dpp3_program_shaper( -		struct dpp *dpp_base, -		const struct pwl_params *params) +static bool dpp3_program_shaper(struct dpp *dpp_base, +				const struct pwl_params *params)  {  	enum dc_lut_mode current_mode;  	enum dc_lut_mode next_mode; @@ -1355,9 +1338,8 @@ static void dpp3_select_3dlut_ram_mask(  	REG_SET(CM_3DLUT_INDEX, 0, CM_3DLUT_INDEX, 0);  } -bool dpp3_program_3dlut( -		struct dpp *dpp_base, -		struct tetrahedral_params *params) +static bool dpp3_program_3dlut(struct dpp *dpp_base, +			       struct tetrahedral_params *params)  {  	enum dc_lut_mode mode;  	bool is_17x17x17; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c index eac08926b574..6a4dcafb9bba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c @@ -490,6 +490,7 @@ static struct hubp_funcs dcn30_hubp_funcs = {  	.hubp_setup_interdependent = hubp2_setup_interdependent,  	.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,  	.set_blank = hubp2_set_blank, +	.set_blank_regs = hubp2_set_blank_regs,  	.dcc_control = hubp3_dcc_control,  	.mem_program_viewport = min_set_viewport,  	.set_cursor_attributes	= hubp2_cursor_set_attributes, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index df2717116604..1db1ca19411d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -344,6 +344,17 @@ void dcn30_enable_writeback(  	dwb->funcs->enable(dwb, &wb_info->dwb_params);  } +void dcn30_prepare_bandwidth(struct dc *dc, + 	struct dc_state *context) +{ +	if (dc->clk_mgr->dc_mode_softmax_enabled) +		if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 && +				context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000) +			dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); + + 	dcn20_prepare_bandwidth(dc, context); +} +  void dcn30_disable_writeback(  		struct dc *dc,  		unsigned int dwb_pipe_inst) @@ -437,7 +448,7 @@ void dcn30_init_hw(struct dc *dc)  	struct dce_hwseq *hws = dc->hwseq;  	struct dc_bios *dcb = dc->ctx->dc_bios;  	struct resource_pool *res_pool = dc->res_pool; -	int i, j; +	int i;  	int edp_num;  	uint32_t backlight = MAX_BACKLIGHT_LEVEL; @@ -534,41 +545,8 @@ void dcn30_init_hw(struct dc *dc)  			hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);  	/* we want to turn off all dp displays before doing detection */ -	if (dc->config.power_down_display_on_boot) { -		uint8_t dpcd_power_state = '\0'; -		enum dc_status status = DC_ERROR_UNEXPECTED; - -		for (i = 0; i < dc->link_count; i++) { -			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) -				continue; -			/* DP 2.0 states that LTTPR regs must be read first */ -			dp_retrieve_lttpr_cap(dc->links[i]); - -			/* if any of the displays are lit up turn them off */ -			status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, -						     &dpcd_power_state, sizeof(dpcd_power_state)); -			if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { -				/* blank dp stream before power off receiver*/ -				if (dc->links[i]->link_enc->funcs->get_dig_frontend) { -					unsigned int fe; - -					fe = dc->links[i]->link_enc->funcs->get_dig_frontend( -										dc->links[i]->link_enc); -					if (fe == ENGINE_ID_UNKNOWN) -						continue; - -					for (j = 0; j < dc->res_pool->stream_enc_count; j++) { -						if (fe == dc->res_pool->stream_enc[j]->id) { -							dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], -										dc->res_pool->stream_enc[j]); -							break; -						} -					} -				} -				dp_receiver_power_ctrl(dc->links[i], false); -			} -		} -	} +	if (dc->config.power_down_display_on_boot) +		dc_link_blank_all_dp_displays(dc);  	/* If taking control over from VBIOS, we may want to optimize our first  	 * mode set, so we need to skip powering down pipes until we know which diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h index e9a0005288d3..73e7b690e82c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h @@ -27,7 +27,7 @@  #define __DC_HWSS_DCN30_H__  #include "hw_sequencer_private.h" - +#include "dcn20/dcn20_hwseq.h"  struct dc;  void dcn30_init_hw(struct dc *dc); @@ -47,6 +47,9 @@ void dcn30_disable_writeback(  		struct dc *dc,  		unsigned int dwb_pipe_inst); +void dcn30_prepare_bandwidth(struct dc *dc, + 	struct dc_state *context); +  bool dcn30_mmhubbub_warmup(  	struct dc *dc,  	unsigned int num_dwb, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 93f32a312fee..bb347319de83 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -29,6 +29,8 @@  #include "dcn21/dcn21_hwseq.h"  #include "dcn30_hwseq.h" +#include "dcn30_init.h" +  static const struct hw_sequencer_funcs dcn30_funcs = {  	.program_gamut_remap = dcn10_program_gamut_remap,  	.init_hw = dcn30_init_hw, @@ -53,6 +55,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {  	.enable_audio_stream = dce110_enable_audio_stream,  	.disable_audio_stream = dce110_disable_audio_stream,  	.disable_plane = dcn20_disable_plane, +	.disable_pixel_data = dcn20_disable_pixel_data,  	.pipe_control_lock = dcn20_pipe_control_lock,  	.interdependent_update_lock = dcn10_lock_all_pipes,  	.cursor_lock = dcn10_cursor_lock, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c index 1c4b171c68ad..7a93eff183d9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c @@ -100,7 +100,7 @@ static void mmhubbub3_warmup_mcif(struct mcif_wb *mcif_wb,  	REG_UPDATE(MMHUBBUB_WARMUP_CONTROL_STATUS, MMHUBBUB_WARMUP_EN, false);  } -void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb, +static void mmhubbub3_config_mcif_buf(struct mcif_wb *mcif_wb,  		struct mcif_buf_params *params,  		unsigned int dest_height)  { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index 95149734378b..0ce0d6165f43 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -1362,7 +1362,7 @@ uint32_t mpcc3_acquire_rmu(struct mpc *mpc, int mpcc_id, int rmu_idx)  	return -1;  } -int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id) +static int mpcc3_release_rmu(struct mpc *mpc, int mpcc_id)  {  	struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);  	int rmu_idx; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 5d9e6413d67a..f5e8916601d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -332,6 +332,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {  		.get_crc = optc1_get_crc,  		.configure_crc = optc2_configure_crc,  		.set_dsc_config = optc3_set_dsc_config, +		.get_dsc_status = optc2_get_dsc_status,  		.set_dwb_source = NULL,  		.set_odm_bypass = optc3_set_odm_bypass,  		.set_odm_combine = optc3_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 79a66e0c4303..602ec9a08549 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -816,7 +816,7 @@ static const struct dc_plane_cap plane_cap = {  			.argb8888 = true,  			.nv12 = true,  			.fp16 = true, -			.p010 = false, +			.p010 = true,  			.ayuv = false,  	}, @@ -840,7 +840,7 @@ static const struct dc_debug_options debug_defaults_drv = {  	.timing_trace = false,  	.clock_trace = true,  	.disable_pplib_clock_request = true, -	.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, +	.pipe_split_policy = MPC_SPLIT_DYNAMIC,  	.force_single_disp_pipe_split = false,  	.disable_dcc = DCC_ENABLE,  	.vsr_support = true, @@ -875,7 +875,7 @@ static const struct dc_debug_options debug_defaults_diags = {  	.use_max_lb = true  }; -void dcn30_dpp_destroy(struct dpp **dpp) +static void dcn30_dpp_destroy(struct dpp **dpp)  {  	kfree(TO_DCN20_DPP(*dpp));  	*dpp = NULL; @@ -992,7 +992,7 @@ static struct mpc *dcn30_mpc_create(  	return &mpc30->base;  } -struct hubbub *dcn30_hubbub_create(struct dc_context *ctx) +static struct hubbub *dcn30_hubbub_create(struct dc_context *ctx)  {  	int i; @@ -1143,9 +1143,8 @@ static struct afmt *dcn30_afmt_create(  	return &afmt3->base;  } -struct stream_encoder *dcn30_stream_encoder_create( -	enum engine_id eng_id, -	struct dc_context *ctx) +static struct stream_encoder *dcn30_stream_encoder_create(enum engine_id eng_id, +							  struct dc_context *ctx)  {  	struct dcn10_stream_encoder *enc1;  	struct vpg *vpg; @@ -1179,8 +1178,7 @@ struct stream_encoder *dcn30_stream_encoder_create(  	return &enc1->base;  } -struct dce_hwseq *dcn30_hwseq_create( -	struct dc_context *ctx) +static struct dce_hwseq *dcn30_hwseq_create(struct dc_context *ctx)  {  	struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); @@ -2639,6 +2637,8 @@ static bool dcn30_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	dc->caps.hdmi_frl_pcon_support = true; +  	/* read VBIOS LTTPR caps */  	{  		if (ctx->dc_bios->funcs->get_lttpr_caps) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c index e85b695f2351..3d42a1a337ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c @@ -30,6 +30,8 @@  #include "dcn30/dcn30_hwseq.h"  #include "dcn301_hwseq.h" +#include "dcn301_init.h" +  static const struct hw_sequencer_funcs dcn301_funcs = {  	.program_gamut_remap = dcn10_program_gamut_remap,  	.init_hw = dcn10_init_hw, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c index 736bda30abc3..ad0df1a72a90 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c @@ -93,7 +93,7 @@ static unsigned int dcn301_get_16_bit_backlight_from_pwm(struct panel_cntl *pane  	return (uint32_t)(current_backlight);  } -uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl) +static uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl)  {  	struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);  	uint32_t value; @@ -147,7 +147,7 @@ uint32_t dcn301_panel_cntl_hw_init(struct panel_cntl *panel_cntl)  	return current_backlight;  } -void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl) +static void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl)  {  	struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(*panel_cntl); @@ -155,7 +155,7 @@ void dcn301_panel_cntl_destroy(struct panel_cntl **panel_cntl)  	*panel_cntl = NULL;  } -bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl) +static bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl)  {  	struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);  	uint32_t value; @@ -165,7 +165,7 @@ bool dcn301_is_panel_backlight_on(struct panel_cntl *panel_cntl)  	return value;  } -bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl) +static bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl)  {  	struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl);  	uint32_t pwr_seq_state, dig_on, dig_on_ovrd; @@ -177,7 +177,7 @@ bool dcn301_is_panel_powered_on(struct panel_cntl *panel_cntl)  	return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);  } -void dcn301_store_backlight_level(struct panel_cntl *panel_cntl) +static void dcn301_store_backlight_level(struct panel_cntl *panel_cntl)  {  	struct dcn301_panel_cntl *dcn301_panel_cntl = TO_DCN301_PANEL_CNTL(panel_cntl); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index fbaa03f26d8b..c1c6e602b06c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -656,7 +656,7 @@ static const struct dc_plane_cap plane_cap = {  			.argb8888 = true,  			.nv12 = true,  			.fp16 = true, -			.p010 = false, +			.p010 = true,  			.ayuv = false,  	}, @@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {  	.disable_clock_gate = true,  	.disable_pplib_clock_request = true,  	.disable_pplib_wm_range = true, -	.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, +	.pipe_split_policy = MPC_SPLIT_DYNAMIC,  	.force_single_disp_pipe_split = false,  	.disable_dcc = DCC_ENABLE,  	.vsr_support = true, @@ -717,15 +717,13 @@ static const struct dc_debug_options debug_defaults_diags = {  	.use_max_lb = false,  }; -void dcn301_dpp_destroy(struct dpp **dpp) +static void dcn301_dpp_destroy(struct dpp **dpp)  {  	kfree(TO_DCN20_DPP(*dpp));  	*dpp = NULL;  } -struct dpp *dcn301_dpp_create( -	struct dc_context *ctx, -	uint32_t inst) +static struct dpp *dcn301_dpp_create(struct dc_context *ctx, uint32_t inst)  {  	struct dcn3_dpp *dpp =  		kzalloc(sizeof(struct dcn3_dpp), GFP_KERNEL); @@ -741,8 +739,8 @@ struct dpp *dcn301_dpp_create(  	kfree(dpp);  	return NULL;  } -struct output_pixel_processor *dcn301_opp_create( -	struct dc_context *ctx, uint32_t inst) +static struct output_pixel_processor *dcn301_opp_create(struct dc_context *ctx, +							uint32_t inst)  {  	struct dcn20_opp *opp =  		kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL); @@ -757,9 +755,7 @@ struct output_pixel_processor *dcn301_opp_create(  	return &opp->base;  } -struct dce_aux *dcn301_aux_engine_create( -	struct dc_context *ctx, -	uint32_t inst) +static struct dce_aux *dcn301_aux_engine_create(struct dc_context *ctx, uint32_t inst)  {  	struct aux_engine_dce110 *aux_engine =  		kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL); @@ -793,9 +789,7 @@ static const struct dce_i2c_mask i2c_masks = {  		I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)  }; -struct dce_i2c_hw *dcn301_i2c_hw_create( -	struct dc_context *ctx, -	uint32_t inst) +static struct dce_i2c_hw *dcn301_i2c_hw_create(struct dc_context *ctx, uint32_t inst)  {  	struct dce_i2c_hw *dce_i2c_hw =  		kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL); @@ -829,7 +823,7 @@ static struct mpc *dcn301_mpc_create(  	return &mpc30->base;  } -struct hubbub *dcn301_hubbub_create(struct dc_context *ctx) +static struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)  {  	int i; @@ -860,9 +854,8 @@ struct hubbub *dcn301_hubbub_create(struct dc_context *ctx)  	return &hubbub3->base;  } -struct timing_generator *dcn301_timing_generator_create( -		struct dc_context *ctx, -		uint32_t instance) +static struct timing_generator *dcn301_timing_generator_create( +	struct dc_context *ctx, uint32_t instance)  {  	struct optc *tgn10 =  		kzalloc(sizeof(struct optc), GFP_KERNEL); @@ -894,7 +887,7 @@ static const struct encoder_feature_support link_enc_feature = {  		.flags.bits.IS_TPS4_CAPABLE = true  }; -struct link_encoder *dcn301_link_encoder_create( +static struct link_encoder *dcn301_link_encoder_create(  	const struct encoder_init_data *enc_init_data)  {  	struct dcn20_link_encoder *enc20 = @@ -915,7 +908,7 @@ struct link_encoder *dcn301_link_encoder_create(  	return &enc20->enc10.base;  } -struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data) +static struct panel_cntl *dcn301_panel_cntl_create(const struct panel_cntl_init_data *init_data)  {  	struct dcn301_panel_cntl *panel_cntl =  		kzalloc(sizeof(struct dcn301_panel_cntl), GFP_KERNEL); @@ -997,9 +990,8 @@ static struct afmt *dcn301_afmt_create(  	return &afmt3->base;  } -struct stream_encoder *dcn301_stream_encoder_create( -	enum engine_id eng_id, -	struct dc_context *ctx) +static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id, +							   struct dc_context *ctx)  {  	struct dcn10_stream_encoder *enc1;  	struct vpg *vpg; @@ -1033,8 +1025,7 @@ struct stream_encoder *dcn301_stream_encoder_create(  	return &enc1->base;  } -struct dce_hwseq *dcn301_hwseq_create( -	struct dc_context *ctx) +static struct dce_hwseq *dcn301_hwseq_create(struct dc_context *ctx)  {  	struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_KERNEL); @@ -1182,9 +1173,7 @@ static void dcn301_destruct(struct dcn301_resource_pool *pool)  		dcn_dccg_destroy(&pool->base.dccg);  } -struct hubp *dcn301_hubp_create( -	struct dc_context *ctx, -	uint32_t inst) +static struct hubp *dcn301_hubp_create(struct dc_context *ctx, uint32_t inst)  {  	struct dcn20_hubp *hubp2 =  		kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL); @@ -1201,7 +1190,7 @@ struct hubp *dcn301_hubp_create(  	return NULL;  } -bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) +static bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)  {  	int i;  	uint32_t pipe_count = pool->res_cap->num_dwb; @@ -1226,7 +1215,7 @@ bool dcn301_dwbc_create(struct dc_context *ctx, struct resource_pool *pool)  	return true;  } -bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) +static bool dcn301_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)  {  	int i;  	uint32_t pipe_count = pool->res_cap->num_dwb; @@ -1449,9 +1438,7 @@ static bool dcn301_resource_construct(  	dc->caps.post_blend_color_processing = true;  	dc->caps.force_dp_tps4_for_cp2520 = true;  	dc->caps.extended_aux_timeout_support = true; -#ifdef CONFIG_DRM_AMD_DC_DMUB  	dc->caps.dmcub_support = true; -#endif  	/* Color pipeline capabilities */  	dc->caps.color.dpp.dcn_arch = 1; @@ -1487,6 +1474,23 @@ static bool dcn301_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	/* read VBIOS LTTPR caps */ +	if (ctx->dc_bios->funcs->get_lttpr_caps) { +		enum bp_result bp_query_result; +		uint8_t is_vbios_lttpr_enable = 0; + +		bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); +		dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; +	} + +	if (ctx->dc_bios->funcs->get_lttpr_interop) { +		enum bp_result bp_query_result; +		uint8_t is_vbios_interop_enabled = 0; + +		bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); +		dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; +	} +  	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)  		dc->debug = debug_defaults_drv;  	else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c index d88b9011c502..eb375f30f5bc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c @@ -29,6 +29,8 @@  #include "dc.h" +#include "dcn302_init.h" +  void dcn302_hw_sequencer_construct(struct dc *dc)  {  	dcn30_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index fcf96cf08c76..2e9cbfa7663b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -211,7 +211,7 @@ static const struct dc_debug_options debug_defaults_drv = {  		.timing_trace = false,  		.clock_trace = true,  		.disable_pplib_clock_request = true, -		.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, +		.pipe_split_policy = MPC_SPLIT_DYNAMIC,  		.force_single_disp_pipe_split = false,  		.disable_dcc = DCC_ENABLE,  		.vsr_support = true, @@ -276,7 +276,7 @@ static const struct dc_plane_cap plane_cap = {  				.argb8888 = true,  				.nv12 = true,  				.fp16 = true, -				.p010 = false, +				.p010 = true,  				.ayuv = false,  		},  		.max_upscale_factor = { @@ -1557,6 +1557,24 @@ static bool dcn302_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	/* read VBIOS LTTPR caps */ +	if (ctx->dc_bios->funcs->get_lttpr_caps) { +		enum bp_result bp_query_result; +		uint8_t is_vbios_lttpr_enable = 0; + +		bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); +		dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; +	} + +	if (ctx->dc_bios->funcs->get_lttpr_interop) { +		enum bp_result bp_query_result; +		uint8_t is_vbios_interop_enabled = 0; + +		bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, +				&is_vbios_interop_enabled); +		dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; +	} +  	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)  		dc->debug = debug_defaults_drv;  	else diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h index a79c54bbc899..294bd757bcb5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_dccg.h @@ -15,7 +15,11 @@  	SR(DPPCLK_DTO_CTRL),\  	DCCG_SRII(DTO_PARAM, DPPCLK, 0),\  	DCCG_SRII(DTO_PARAM, DPPCLK, 1),\ -	SR(REFCLK_CNTL) +	SR(REFCLK_CNTL),\ +	SR(DISPCLK_FREQ_CHANGE_CNTL),\ +	DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\ +	DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1) +  #define DCCG_MASK_SH_LIST_DCN3_03(mask_sh) \  		DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\ @@ -25,6 +29,18 @@  		DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_PHASE, mask_sh),\  		DCCG_SF(DPPCLK0_DTO_PARAM, DPPCLK0_DTO_MODULO, mask_sh),\  		DCCG_SF(REFCLK_CNTL, REFCLK_CLOCK_EN, mask_sh),\ -		DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh) +		DCCG_SF(REFCLK_CNTL, REFCLK_SRC_SEL, mask_sh),\ +		DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_DELAY, mask_sh),\ +		DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_STEP_SIZE, mask_sh),\ +		DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, mask_sh),\ +		DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_MAX_ERRDET_CYCLES, mask_sh),\ +		DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_RESET, mask_sh),\ +		DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_STATE, mask_sh),\ +		DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DCCG_FIFO_ERRDET_OVR_EN, mask_sh),\ +		DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_CHG_FWD_CORR_DISABLE, mask_sh),\ +		DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 0, mask_sh),\ +		DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, ADD_PIXEL, 1, mask_sh),\ +		DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 0, mask_sh),\ +		DCCG_SFII(OTG, PIXEL_RATE_CNTL, OTG, DROP_PIXEL, 1, mask_sh)  #endif //__DCN303_DCCG_H__ diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c index aa5dbbade2bd..f499f8ab5e47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c @@ -9,6 +9,8 @@  #include "dcn30/dcn30_init.h"  #include "dc.h" +#include "dcn303_init.h" +  void dcn303_hw_sequencer_construct(struct dc *dc)  {  	dcn30_hw_sequencer_construct(dc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 4a9b64023675..2de687f64cf6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -193,7 +193,7 @@ static const struct dc_debug_options debug_defaults_drv = {  		.timing_trace = false,  		.clock_trace = true,  		.disable_pplib_clock_request = true, -		.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, +		.pipe_split_policy = MPC_SPLIT_DYNAMIC,  		.force_single_disp_pipe_split = false,  		.disable_dcc = DCC_ENABLE,  		.vsr_support = true, @@ -254,7 +254,7 @@ static const struct dc_plane_cap plane_cap = {  				.argb8888 = true,  				.nv12 = true,  				.fp16 = true, -				.p010 = false, +				.p010 = true,  				.ayuv = false,  		},  		.max_upscale_factor = { @@ -1500,6 +1500,23 @@ static bool dcn303_resource_construct(  	dc->caps.color.mpc.ogam_rom_caps.hlg = 0;  	dc->caps.color.mpc.ocsc = 1; +	/* read VBIOS LTTPR caps */ +	if (ctx->dc_bios->funcs->get_lttpr_caps) { +		enum bp_result bp_query_result; +		uint8_t is_vbios_lttpr_enable = 0; + +		bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); +		dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; +	} + +	if (ctx->dc_bios->funcs->get_lttpr_interop) { +		enum bp_result bp_query_result; +		uint8_t is_vbios_interop_enabled = 0; + +		bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); +		dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; +	} +  	if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)  		dc->debug = debug_defaults_drv;  	else diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 815481a3ef54..ea4f8e06b07c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -462,7 +462,7 @@ void dccg31_set_physymclk(  }  /* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */ -void dccg31_set_dtbclk_dto( +static void dccg31_set_dtbclk_dto(  		struct dccg *dccg,  		int dtbclk_inst,  		int req_dtbclk_khz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index ee6f13bef377..8b9b1a5309ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -67,6 +67,68 @@  #define MIN(X, Y) ((X) < (Y) ? (X) : (Y))  #endif +static uint8_t phy_id_from_transmitter(enum transmitter t) +{ +	uint8_t phy_id; + +	switch (t) { +	case TRANSMITTER_UNIPHY_A: +		phy_id = 0; +		break; +	case TRANSMITTER_UNIPHY_B: +		phy_id = 1; +		break; +	case TRANSMITTER_UNIPHY_C: +		phy_id = 2; +		break; +	case TRANSMITTER_UNIPHY_D: +		phy_id = 3; +		break; +	case TRANSMITTER_UNIPHY_E: +		phy_id = 4; +		break; +	case TRANSMITTER_UNIPHY_F: +		phy_id = 5; +		break; +	case TRANSMITTER_UNIPHY_G: +		phy_id = 6; +		break; +	default: +		phy_id = 0; +		break; +	} +	return phy_id; +} + +static bool has_query_dp_alt(struct link_encoder *enc) +{ +	struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; + +	/* Supports development firmware and firmware >= 4.0.11 */ +	return dc_dmub_srv && +	       !(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) && +		 dc_dmub_srv->dmub->fw_version <= DMUB_FW_VERSION(4, 0, 10)); +} + +static bool query_dp_alt_from_dmub(struct link_encoder *enc, +				   union dmub_rb_cmd *cmd) +{ +	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); +	struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv; + +	memset(cmd, 0, sizeof(*cmd)); +	cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS; +	cmd->query_dp_alt.header.sub_type = +		DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT; +	cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data); +	cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter); + +	if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd)) +		return false; + +	return true; +} +  void dcn31_link_encoder_set_dio_phy_mux(  	struct link_encoder *enc,  	enum encoder_type_select sel, @@ -141,7 +203,7 @@ void dcn31_link_encoder_set_dio_phy_mux(  	}  } -void enc31_hw_init(struct link_encoder *enc) +static void enc31_hw_init(struct link_encoder *enc)  {  	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); @@ -536,57 +598,90 @@ void dcn31_link_encoder_disable_output(  bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)  {  	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); +	union dmub_rb_cmd cmd;  	uint32_t dp_alt_mode_disable; -	bool is_usb_c_alt_mode = false; -	if (enc->features.flags.bits.DP_IS_USB_C) { -		if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { -			// [Note] no need to check hw_internal_rev once phy mux selection is ready -			REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); -		} else { +	/* Only applicable to USB-C PHY. */ +	if (!enc->features.flags.bits.DP_IS_USB_C) +		return false; + +	/* +	 * Use the new interface from DMCUB if available. +	 * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running. +	 */ +	if (has_query_dp_alt(enc)) { +		if (!query_dp_alt_from_dmub(enc, &cmd)) +			return false; + +		return (cmd.query_dp_alt.data.is_dp_alt_disable == 0); +	} + +	/* Legacy path, avoid if possible. */ +	if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { +		REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, +			&dp_alt_mode_disable); +	} else {  		/*  		 * B0 phys use a new set of registers to check whether alt mode is disabled.  		 * if value == 1 alt mode is disabled, otherwise it is enabled.  		 */ -			if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) -					|| (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) -					|| (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { -				REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); -			} else { -			// [Note] need to change TRANSMITTER_UNIPHY_C/D to F/G once phy mux selection is ready -				REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable); -			} +		if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) || +		    (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) || +		    (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { +			REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, +				&dp_alt_mode_disable); +		} else { +			REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, +				&dp_alt_mode_disable);  		} - -		is_usb_c_alt_mode = (dp_alt_mode_disable == 0);  	} -	return is_usb_c_alt_mode; +	return (dp_alt_mode_disable == 0);  } -void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, -										 struct dc_link_settings *link_settings) +void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings)  {  	struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); +	union dmub_rb_cmd cmd;  	uint32_t is_in_usb_c_dp4_mode = 0;  	dcn10_link_encoder_get_max_link_cap(enc, link_settings); -	/* in usb c dp2 mode, max lane count is 2 */ -	if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) { -		if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { -			// [Note] no need to check hw_internal_rev once phy mux selection is ready -			REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); +	/* Take the link cap directly if not USB */ +	if (!enc->features.flags.bits.DP_IS_USB_C) +		return; + +	/* +	 * Use the new interface from DMCUB if available. +	 * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running. +	 */ +	if (has_query_dp_alt(enc)) { +		if (!query_dp_alt_from_dmub(enc, &cmd)) +			return; + +		if (cmd.query_dp_alt.data.is_usb && +		    cmd.query_dp_alt.data.is_dp4 == 0) +			link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count); + +		return; +	} + +	/* Legacy path, avoid if possible. */ +	if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) { +		REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, +			&is_in_usb_c_dp4_mode); +	} else { +		if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) || +		    (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) || +		    (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { +			REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, +				&is_in_usb_c_dp4_mode);  		} else { -			if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) -					|| (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) -					|| (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) { -				REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); -			} else { -				REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode); -			} +			REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, +				&is_in_usb_c_dp4_mode);  		} -		if (!is_in_usb_c_dp4_mode) -			link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);  	} + +	if (!is_in_usb_c_dp4_mode) +		link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c index 6c08e21bb708..80dfaa4d4d81 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c @@ -499,7 +499,8 @@ static enum bp_result link_transmitter_control(  void dcn31_hpo_dp_link_enc_enable_dp_output(  	struct hpo_dp_link_encoder *enc,  	const struct dc_link_settings *link_settings, -	enum transmitter transmitter) +	enum transmitter transmitter, +	enum hpd_source_id hpd_source)  {  	struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);  	struct bp_transmitter_control cntl = { 0 }; @@ -508,6 +509,9 @@ void dcn31_hpo_dp_link_enc_enable_dp_output(  	/* Set the transmitter */  	enc3->base.transmitter = transmitter; +	/* Set the hpd source */ +	enc3->base.hpd_source = hpd_source; +  	/* Enable the PHY */  	cntl.action = TRANSMITTER_CONTROL_ENABLE;  	cntl.engine_id = ENGINE_ID_UNKNOWN; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h index 0706ccaf6fec..e324e9b83136 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h @@ -184,7 +184,8 @@ void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31,  void dcn31_hpo_dp_link_enc_enable_dp_output(  	struct hpo_dp_link_encoder *enc,  	const struct dc_link_settings *link_settings, -	enum transmitter transmitter); +	enum transmitter transmitter, +	enum hpd_source_id hpd_source);  void dcn31_hpo_dp_link_enc_disable_output(  	struct hpo_dp_link_encoder *enc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 565f12dd179a..5065904c7833 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -358,8 +358,8 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(  	h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right;  	v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom; -	hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0x80 : 0; -	vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0x80 : 0; +	hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80; +	vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80;  	v_freq = hw_crtc_timing.pix_clk_100hz * 100;  	/*   MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1 diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 5dd1ce9ddb53..4206ce5bf9a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -112,7 +112,7 @@ void dcn31_init_hw(struct dc *dc)  	struct dc_bios *dcb = dc->ctx->dc_bios;  	struct resource_pool *res_pool = dc->res_pool;  	uint32_t backlight = MAX_BACKLIGHT_LEVEL; -	int i, j; +	int i;  	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)  		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); @@ -192,50 +192,13 @@ void dcn31_init_hw(struct dc *dc)  			link->link_status.link_active = true;  	} -	/* Power gate DSCs */ -	for (i = 0; i < res_pool->res_cap->num_dsc; i++) -		if (hws->funcs.dsc_pg_control != NULL) -			hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); -  	/* Enables outbox notifications for usb4 dpia */  	if (dc->res_pool->usb4_dpia_count)  		dmub_enable_outbox_notification(dc);  	/* we want to turn off all dp displays before doing detection */ -	if (dc->config.power_down_display_on_boot) { -		uint8_t dpcd_power_state = '\0'; -		enum dc_status status = DC_ERROR_UNEXPECTED; - -		for (i = 0; i < dc->link_count; i++) { -			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) -				continue; - -			/* if any of the displays are lit up turn them off */ -			status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, -						     &dpcd_power_state, sizeof(dpcd_power_state)); -			if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { -				/* blank dp stream before power off receiver*/ -				if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY && -						dc->links[i]->link_enc->funcs->get_dig_frontend) { -					unsigned int fe; - -					fe = dc->links[i]->link_enc->funcs->get_dig_frontend( -										dc->links[i]->link_enc); -					if (fe == ENGINE_ID_UNKNOWN) -						continue; - -					for (j = 0; j < dc->res_pool->stream_enc_count; j++) { -						if (fe == dc->res_pool->stream_enc[j]->id) { -							dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], -										dc->res_pool->stream_enc[j]); -							break; -						} -					} -				} -				dp_receiver_power_ctrl(dc->links[i], false); -			} -		} -	} +	if (dc->config.power_down_display_on_boot) +		dc_link_blank_all_dp_displays(dc);  	/* If taking control over from VBIOS, we may want to optimize our first  	 * mode set, so we need to skip powering down pipes until we know which @@ -602,7 +565,7 @@ void dcn31_reset_hw_ctx_wrap(  			dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);  			if (hws->funcs.enable_stream_gating) -				hws->funcs.enable_stream_gating(dc, pipe_ctx); +				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);  			if (old_clk)  				old_clk->funcs->cs_power_down(old_clk);  		} diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 05335a8c3c2d..d7559e5a99ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -31,6 +31,8 @@  #include "dcn301/dcn301_hwseq.h"  #include "dcn31/dcn31_hwseq.h" +#include "dcn31_init.h" +  static const struct hw_sequencer_funcs dcn31_funcs = {  	.program_gamut_remap = dcn10_program_gamut_remap,  	.init_hw = dcn31_init_hw, @@ -101,6 +103,8 @@ static const struct hw_sequencer_funcs dcn31_funcs = {  	.z10_restore = dcn31_z10_restore,  	.z10_save_init = dcn31_z10_save_init,  	.set_disp_pattern_generator = dcn30_set_disp_pattern_generator, +	.optimize_pwr_state = dcn21_optimize_pwr_state, +	.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,  	.update_visual_confirm_color = dcn20_update_visual_confirm_color,  }; @@ -149,4 +153,9 @@ void dcn31_hw_sequencer_construct(struct dc *dc)  		dc->hwss.init_hw = dcn20_fpga_init_hw;  		dc->hwseq->funcs.init_pipes = NULL;  	} +	if (dc->debug.disable_z10) { +		/*hw not support z10 or sw disable it*/ +		dc->hwss.z10_restore = NULL; +		dc->hwss.z10_save_init = NULL; +	}  } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c index a4b1d98f0007..e8562fa11366 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c @@ -256,6 +256,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = {  		.get_crc = optc1_get_crc,  		.configure_crc = optc2_configure_crc,  		.set_dsc_config = optc3_set_dsc_config, +		.get_dsc_status = optc2_get_dsc_status,  		.set_dwb_source = NULL,  		.set_odm_bypass = optc3_set_odm_bypass,  		.set_odm_combine = optc31_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c index 3b3721386571..83ece02380a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c @@ -65,7 +65,7 @@ static uint32_t dcn31_get_16_bit_backlight_from_pwm(struct panel_cntl *panel_cnt  	return cmd.panel_cntl.data.current_backlight;  } -uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl) +static uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)  {  	struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(panel_cntl);  	struct dc_dmub_srv *dc_dmub_srv = panel_cntl->ctx->dmub_srv; @@ -96,7 +96,7 @@ uint32_t dcn31_panel_cntl_hw_init(struct panel_cntl *panel_cntl)  	return cmd.panel_cntl.data.current_backlight;  } -void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl) +static void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl)  {  	struct dcn31_panel_cntl *dcn31_panel_cntl = TO_DCN31_PANEL_CNTL(*panel_cntl); @@ -104,7 +104,7 @@ void dcn31_panel_cntl_destroy(struct panel_cntl **panel_cntl)  	*panel_cntl = NULL;  } -bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl) +static bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)  {  	union dmub_rb_cmd cmd; @@ -114,7 +114,7 @@ bool dcn31_is_panel_backlight_on(struct panel_cntl *panel_cntl)  	return cmd.panel_cntl.data.is_backlight_on;  } -bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl) +static bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)  {  	union dmub_rb_cmd cmd; @@ -124,7 +124,7 @@ bool dcn31_is_panel_powered_on(struct panel_cntl *panel_cntl)  	return cmd.panel_cntl.data.is_powered_on;  } -void dcn31_store_backlight_level(struct panel_cntl *panel_cntl) +static void dcn31_store_backlight_level(struct panel_cntl *panel_cntl)  {  	union dmub_rb_cmd cmd; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 18896294ae12..42ed47e8133d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -355,6 +355,14 @@ static const struct dce110_clk_src_regs clk_src_regs[] = {  	clk_src_regs(3, D),  	clk_src_regs(4, E)  }; +/*pll_id being rempped in dmub, in driver it is logical instance*/ +static const struct dce110_clk_src_regs clk_src_regs_b0[] = { +	clk_src_regs(0, A), +	clk_src_regs(1, B), +	clk_src_regs(2, F), +	clk_src_regs(3, G), +	clk_src_regs(4, E) +};  static const struct dce110_clk_src_shift cs_shift = {  		CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT) @@ -485,7 +493,8 @@ static const struct dcn31_apg_mask apg_mask = {  	SE_DCN3_REG_LIST(id)\  } -static const struct dcn10_stream_enc_registers stream_enc_regs[] = { +/* Some encoders won't be initialized here - but they're logical, not physical. */ +static const struct dcn10_stream_enc_registers stream_enc_regs[ENGINE_ID_COUNT] = {  	stream_enc_regs(0),  	stream_enc_regs(1),  	stream_enc_regs(2), @@ -968,7 +977,7 @@ static const struct dc_plane_cap plane_cap = {  			.argb8888 = true,  			.nv12 = true,  			.fp16 = true, -			.p010 = false, +			.p010 = true,  			.ayuv = false,  	}, @@ -994,7 +1003,7 @@ static const struct dc_debug_options debug_defaults_drv = {  	.timing_trace = false,  	.clock_trace = true,  	.disable_pplib_clock_request = false, -	.pipe_split_policy = MPC_SPLIT_AVOID, +	.pipe_split_policy = MPC_SPLIT_DYNAMIC,  	.force_single_disp_pipe_split = false,  	.disable_dcc = DCC_ENABLE,  	.vsr_support = true, @@ -1023,6 +1032,7 @@ static const struct dc_debug_options debug_defaults_drv = {  	},  	.optimize_edp_link_rate = true,  	.enable_sw_cntl_psr = true, +	.apply_vendor_specific_lttpr_wa = true,  };  static const struct dc_debug_options debug_defaults_diags = { @@ -1270,7 +1280,7 @@ static struct link_encoder *dcn31_link_enc_create_minimal(  	return &enc20->enc10.base;  } -struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data) +static struct panel_cntl *dcn31_panel_cntl_create(const struct panel_cntl_init_data *init_data)  {  	struct dcn31_panel_cntl *panel_cntl =  		kzalloc(sizeof(struct dcn31_panel_cntl), GFP_KERNEL); @@ -1774,6 +1784,7 @@ static int dcn31_populate_dml_pipes_from_context(  	int i, pipe_cnt;  	struct resource_context *res_ctx = &context->res_ctx;  	struct pipe_ctx *pipe; +	bool upscaled = false;  	dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); @@ -1785,6 +1796,11 @@ static int dcn31_populate_dml_pipes_from_context(  		pipe = &res_ctx->pipe_ctx[i];  		timing = &pipe->stream->timing; +		if (pipe->plane_state && +				(pipe->plane_state->src_rect.height < pipe->plane_state->dst_rect.height || +				pipe->plane_state->src_rect.width < pipe->plane_state->dst_rect.width)) +			upscaled = true; +  		/*  		 * Immediate flip can be set dynamically after enabling the plane.  		 * We need to require support for immediate flip or underflow can be @@ -1829,6 +1845,11 @@ static int dcn31_populate_dml_pipes_from_context(  			context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;  			pipes[0].pipe.src.unbounded_req_mode = true;  		} +	} else if (context->stream_count >= dc->debug.crb_alloc_policy_min_disp_count +			&& dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) { +		context->bw_ctx.dml.ip.det_buffer_size_kbytes = dc->debug.crb_alloc_policy * 64; +	} else if (context->stream_count >= 3 && upscaled) { +		context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;  	}  	return pipe_cnt; @@ -1963,7 +1984,7 @@ static void dcn31_calculate_wm_and_dlg_fp(  		pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);  		pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); -		if (dc->config.forced_clocks) { +		if (dc->config.forced_clocks || dc->debug.max_disp_clk) {  			pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;  			pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;  		} @@ -2199,6 +2220,8 @@ static bool dcn31_resource_construct(  	dc->caps.post_blend_color_processing = true;  	dc->caps.force_dp_tps4_for_cp2520 = true;  	dc->caps.dp_hpo = true; +	dc->caps.hdmi_frl_pcon_support = true; +	dc->caps.edp_dsc_support = true;  	dc->caps.extended_aux_timeout_support = true;  	dc->caps.dmcub_support = true;  	dc->caps.is_apu = true; @@ -2276,14 +2299,27 @@ static bool dcn31_resource_construct(  			dcn30_clock_source_create(ctx, ctx->dc_bios,  				CLOCK_SOURCE_COMBO_PHY_PLL1,  				&clk_src_regs[1], false); -	pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = +	/*move phypllx_pixclk_resync to dmub next*/ +	if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { +		pool->base.clock_sources[DCN31_CLK_SRC_PLL2] = +			dcn30_clock_source_create(ctx, ctx->dc_bios, +				CLOCK_SOURCE_COMBO_PHY_PLL2, +				&clk_src_regs_b0[2], false); +		pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = +			dcn30_clock_source_create(ctx, ctx->dc_bios, +				CLOCK_SOURCE_COMBO_PHY_PLL3, +				&clk_src_regs_b0[3], false); +	} else { +		pool->base.clock_sources[DCN31_CLK_SRC_PLL2] =  			dcn30_clock_source_create(ctx, ctx->dc_bios,  				CLOCK_SOURCE_COMBO_PHY_PLL2,  				&clk_src_regs[2], false); -	pool->base.clock_sources[DCN31_CLK_SRC_PLL3] = +		pool->base.clock_sources[DCN31_CLK_SRC_PLL3] =  			dcn30_clock_source_create(ctx, ctx->dc_bios,  				CLOCK_SOURCE_COMBO_PHY_PLL3,  				&clk_src_regs[3], false); +	} +  	pool->base.clock_sources[DCN31_CLK_SRC_PLL4] =  			dcn30_clock_source_create(ctx, ctx->dc_bios,  				CLOCK_SOURCE_COMBO_PHY_PLL4, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h index 416fe7a721d8..a513363b3326 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h @@ -49,4 +49,35 @@ struct resource_pool *dcn31_create_resource_pool(  		const struct dc_init_data *init_data,  		struct dc *dc); +/*temp: B0 specific before switch to dcn313 headers*/ +#ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL +#define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e +#define regPHYPLLF_PIXCLK_RESYNC_CNTL_BASE_IDX 1 +#define regPHYPLLG_PIXCLK_RESYNC_CNTL 0x005f +#define regPHYPLLG_PIXCLK_RESYNC_CNTL_BASE_IDX 1 + +//PHYPLLF_PIXCLK_RESYNC_CNTL +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE__SHIFT 0x8 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_ENABLE_MASK 0x00000100L +#define PHYPLLF_PIXCLK_RESYNC_CNTL__PHYPLLF_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L + +//PHYPLLG_PIXCLK_RESYNC_CNTL +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE__SHIFT 0x0 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS__SHIFT 0x1 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE__SHIFT 0x8 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9 +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DEEP_COLOR_DTO_ENABLE_STATUS_MASK 0x00000002L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_ENABLE_MASK 0x00000100L +#define PHYPLLG_PIXCLK_RESYNC_CNTL__PHYPLLG_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L +#endif  #endif /* _DCN31_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h index 511f9e1159c7..4229369c57f4 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -34,12 +34,12 @@ struct cp_psp_stream_config {  	uint8_t dig_fe;  	uint8_t link_enc_idx;  	uint8_t stream_enc_idx; -	uint8_t phy_idx;  	uint8_t dio_output_idx; -	uint8_t dio_output_type; +	uint8_t phy_idx;  	uint8_t assr_enabled;  	uint8_t mst_enabled;  	uint8_t dp2_enabled; +	uint8_t usb4_enabled;  	void *dm_stream_ctx;  	bool dpms_off;  }; diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 0fe66b080a03..7f94e3f70d7f 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -59,7 +59,7 @@ void dm_helpers_free_gpu_mem(  		void *pvMem);  enum dc_edid_status dm_helpers_parse_edid_caps( -	struct dc_context *ctx, +	struct dc_link *link,  	const struct dc_edid *edid,  	struct dc_edid_caps *edid_caps); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 46c433c0bcb0..8bc27de4c104 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -1711,14 +1711,6 @@ void dml21_rq_dlg_get_dlg_reg(  	dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);  } -void dml_rq_dlg_get_arb_params(struct display_mode_lib *mode_lib, display_arb_params_st *arb_param) -{ -	memset(arb_param, 0, sizeof(*arb_param)); -	arb_param->max_req_outstanding = 256; -	arb_param->min_req_outstanding = 68; -	arb_param->sat_level_us = 60; -} -  static void calculate_ttu_cursor(  		struct display_mode_lib *mode_lib,  		double *refcyc_per_req_delivery_pre_cur, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 7e937bdcea00..6feb23432f8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -422,62 +422,8 @@ static void CalculateUrgentBurstFactor(  static void UseMinimumDCFCLK(  		struct display_mode_lib *mode_lib, -		int MaxInterDCNTileRepeaters,  		int MaxPrefetchMode, -		double FinalDRAMClockChangeLatency, -		double SREnterPlusExitTime, -		int ReturnBusWidth, -		int RoundTripPingLatencyCycles, -		int ReorderingBytes, -		int PixelChunkSizeInKByte, -		int MetaChunkSize, -		bool GPUVMEnable, -		int GPUVMMaxPageTableLevels, -		bool HostVMEnable, -		int NumberOfActivePlanes, -		double HostVMMinPageSize, -		int HostVMMaxNonCachedPageTableLevels, -		bool DynamicMetadataVMEnabled, -		enum immediate_flip_requirement ImmediateFlipRequirement, -		bool ProgressiveToInterlaceUnitInOPP, -		double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation, -		double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency, -		int VTotal[], -		int VActive[], -		int DynamicMetadataTransmittedBytes[], -		int DynamicMetadataLinesBeforeActiveRequired[], -		bool Interlace[], -		double RequiredDPPCLK[][2][DC__NUM_DPP__MAX], -		double RequiredDISPCLK[][2], -		double UrgLatency[], -		unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX], -		double ProjectedDCFCLKDeepSleep[][2], -		double MaximumVStartup[][2][DC__NUM_DPP__MAX], -		double TotalVActivePixelBandwidth[][2], -		double TotalVActiveCursorBandwidth[][2], -		double TotalMetaRowBandwidth[][2], -		double TotalDPTERowBandwidth[][2], -		unsigned int TotalNumberOfActiveDPP[][2], -		unsigned int TotalNumberOfDCCActiveDPP[][2], -		int dpte_group_bytes[], -		double PrefetchLinesY[][2][DC__NUM_DPP__MAX], -		double PrefetchLinesC[][2][DC__NUM_DPP__MAX], -		int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX], -		int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX], -		int BytePerPixelY[], -		int BytePerPixelC[], -		int HTotal[], -		double PixelClock[], -		double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX], -		double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX], -		double MetaRowBytes[][2][DC__NUM_DPP__MAX], -		bool DynamicMetadataEnable[], -		double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX], -		double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX], -		double ReadBandwidthLuma[], -		double ReadBandwidthChroma[], -		double DCFCLKPerState[], -		double DCFCLKState[][2]); +		int ReorderingBytes);  static void CalculatePixelDeliveryTimes(  		unsigned int NumberOfActivePlanes, @@ -3949,6 +3895,102 @@ static double TruncToValidBPP(  	return BPP_INVALID;  } +static noinline void CalculatePrefetchSchedulePerPlane( +		struct display_mode_lib *mode_lib, +		double HostVMInefficiencyFactor, +		int i, +		unsigned j, +		unsigned k) +{ +	struct vba_vars_st *v = &mode_lib->vba; +	Pipe myPipe; + +	myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k]; +	myPipe.DISPCLK = v->RequiredDISPCLK[i][j]; +	myPipe.PixelClock = v->PixelClock[k]; +	myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j]; +	myPipe.DPPPerPlane = v->NoOfDPP[i][j][k]; +	myPipe.ScalerEnabled = v->ScalerEnabled[k]; +	myPipe.SourceScan = v->SourceScan[k]; +	myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k]; +	myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k]; +	myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k]; +	myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k]; +	myPipe.InterlaceEnable = v->Interlace[k]; +	myPipe.NumberOfCursors = v->NumberOfCursors[k]; +	myPipe.VBlank = v->VTotal[k] - v->VActive[k]; +	myPipe.HTotal = v->HTotal[k]; +	myPipe.DCCEnable = v->DCCEnable[k]; +	myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1 +		|| v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1; +	myPipe.SourcePixelFormat = v->SourcePixelFormat[k]; +	myPipe.BytePerPixelY = v->BytePerPixelY[k]; +	myPipe.BytePerPixelC = v->BytePerPixelC[k]; +	myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP; +	v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule( +		mode_lib, +		HostVMInefficiencyFactor, +		&myPipe, +		v->DSCDelayPerState[i][k], +		v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater, +		v->DPPCLKDelaySCL, +		v->DPPCLKDelaySCLLBOnly, +		v->DPPCLKDelayCNVCCursor, +		v->DISPCLKDelaySubtotal, +		v->SwathWidthYThisState[k] / v->HRatio[k], +		v->OutputFormat[k], +		v->MaxInterDCNTileRepeaters, +		dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]), +		v->MaximumVStartup[i][j][k], +		v->GPUVMMaxPageTableLevels, +		v->GPUVMEnable, +		v->HostVMEnable, +		v->HostVMMaxNonCachedPageTableLevels, +		v->HostVMMinPageSize, +		v->DynamicMetadataEnable[k], +		v->DynamicMetadataVMEnabled, +		v->DynamicMetadataLinesBeforeActiveRequired[k], +		v->DynamicMetadataTransmittedBytes[k], +		v->UrgLatency[i], +		v->ExtraLatency, +		v->TimeCalc, +		v->PDEAndMetaPTEBytesPerFrame[i][j][k], +		v->MetaRowBytes[i][j][k], +		v->DPTEBytesPerRow[i][j][k], +		v->PrefetchLinesY[i][j][k], +		v->SwathWidthYThisState[k], +		v->PrefillY[k], +		v->MaxNumSwY[k], +		v->PrefetchLinesC[i][j][k], +		v->SwathWidthCThisState[k], +		v->PrefillC[k], +		v->MaxNumSwC[k], +		v->swath_width_luma_ub_this_state[k], +		v->swath_width_chroma_ub_this_state[k], +		v->SwathHeightYThisState[k], +		v->SwathHeightCThisState[k], +		v->TWait, +		&v->DSTXAfterScaler[k], +		&v->DSTYAfterScaler[k], +		&v->LineTimesForPrefetch[k], +		&v->PrefetchBW[k], +		&v->LinesForMetaPTE[k], +		&v->LinesForMetaAndDPTERow[k], +		&v->VRatioPreY[i][j][k], +		&v->VRatioPreC[i][j][k], +		&v->RequiredPrefetchPixelDataBWLuma[i][j][k], +		&v->RequiredPrefetchPixelDataBWChroma[i][j][k], +		&v->NoTimeForDynamicMetadata[i][j][k], +		&v->Tno_bw[k], +		&v->prefetch_vmrow_bw[k], +		&v->dummy7[k], +		&v->dummy8[k], +		&v->dummy13[k], +		&v->VUpdateOffsetPix[k], +		&v->VUpdateWidthPix[k], +		&v->VReadyOffsetPix[k]); +} +  void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)  {  	struct vba_vars_st *v = &mode_lib->vba; @@ -5079,66 +5121,8 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  		}  	} -	if (v->UseMinimumRequiredDCFCLK == true) { -		UseMinimumDCFCLK( -				mode_lib, -				v->MaxInterDCNTileRepeaters, -				MaxPrefetchMode, -				v->DRAMClockChangeLatency, -				v->SREnterPlusExitTime, -				v->ReturnBusWidth, -				v->RoundTripPingLatencyCycles, -				ReorderingBytes, -				v->PixelChunkSizeInKByte, -				v->MetaChunkSize, -				v->GPUVMEnable, -				v->GPUVMMaxPageTableLevels, -				v->HostVMEnable, -				v->NumberOfActivePlanes, -				v->HostVMMinPageSize, -				v->HostVMMaxNonCachedPageTableLevels, -				v->DynamicMetadataVMEnabled, -				v->ImmediateFlipRequirement[0], -				v->ProgressiveToInterlaceUnitInOPP, -				v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation, -				v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency, -				v->VTotal, -				v->VActive, -				v->DynamicMetadataTransmittedBytes, -				v->DynamicMetadataLinesBeforeActiveRequired, -				v->Interlace, -				v->RequiredDPPCLK, -				v->RequiredDISPCLK, -				v->UrgLatency, -				v->NoOfDPP, -				v->ProjectedDCFCLKDeepSleep, -				v->MaximumVStartup, -				v->TotalVActivePixelBandwidth, -				v->TotalVActiveCursorBandwidth, -				v->TotalMetaRowBandwidth, -				v->TotalDPTERowBandwidth, -				v->TotalNumberOfActiveDPP, -				v->TotalNumberOfDCCActiveDPP, -				v->dpte_group_bytes, -				v->PrefetchLinesY, -				v->PrefetchLinesC, -				v->swath_width_luma_ub_all_states, -				v->swath_width_chroma_ub_all_states, -				v->BytePerPixelY, -				v->BytePerPixelC, -				v->HTotal, -				v->PixelClock, -				v->PDEAndMetaPTEBytesPerFrame, -				v->DPTEBytesPerRow, -				v->MetaRowBytes, -				v->DynamicMetadataEnable, -				v->VActivePixelBandwidth, -				v->VActiveCursorBandwidth, -				v->ReadBandwidthLuma, -				v->ReadBandwidthChroma, -				v->DCFCLKPerState, -				v->DCFCLKState); -	} +	if (v->UseMinimumRequiredDCFCLK == true) +		UseMinimumDCFCLK(mode_lib, MaxPrefetchMode, ReorderingBytes);  	for (i = 0; i < v->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) { @@ -5276,92 +5260,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l  						v->SREnterPlusExitTime);  				for (k = 0; k < v->NumberOfActivePlanes; k++) { -					Pipe myPipe; - -					myPipe.DPPCLK = v->RequiredDPPCLK[i][j][k]; -					myPipe.DISPCLK = v->RequiredDISPCLK[i][j]; -					myPipe.PixelClock = v->PixelClock[k]; -					myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j]; -					myPipe.DPPPerPlane = v->NoOfDPP[i][j][k]; -					myPipe.ScalerEnabled = v->ScalerEnabled[k]; -					myPipe.SourceScan = v->SourceScan[k]; -					myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k]; -					myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k]; -					myPipe.BlockWidth256BytesC = v->Read256BlockWidthC[k]; -					myPipe.BlockHeight256BytesC = v->Read256BlockHeightC[k]; -					myPipe.InterlaceEnable = v->Interlace[k]; -					myPipe.NumberOfCursors = v->NumberOfCursors[k]; -					myPipe.VBlank = v->VTotal[k] - v->VActive[k]; -					myPipe.HTotal = v->HTotal[k]; -					myPipe.DCCEnable = v->DCCEnable[k]; -					myPipe.ODMCombineIsEnabled = v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1 -							|| v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_2to1; -					myPipe.SourcePixelFormat = v->SourcePixelFormat[k]; -					myPipe.BytePerPixelY = v->BytePerPixelY[k]; -					myPipe.BytePerPixelC = v->BytePerPixelC[k]; -					myPipe.ProgressiveToInterlaceUnitInOPP = v->ProgressiveToInterlaceUnitInOPP; -					v->NoTimeForPrefetch[i][j][k] = CalculatePrefetchSchedule( -							mode_lib, -							HostVMInefficiencyFactor, -							&myPipe, -							v->DSCDelayPerState[i][k], -							v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater, -							v->DPPCLKDelaySCL, -							v->DPPCLKDelaySCLLBOnly, -							v->DPPCLKDelayCNVCCursor, -							v->DISPCLKDelaySubtotal, -							v->SwathWidthYThisState[k] / v->HRatio[k], -							v->OutputFormat[k], -							v->MaxInterDCNTileRepeaters, -							dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]), -							v->MaximumVStartup[i][j][k], -							v->GPUVMMaxPageTableLevels, -							v->GPUVMEnable, -							v->HostVMEnable, -							v->HostVMMaxNonCachedPageTableLevels, -							v->HostVMMinPageSize, -							v->DynamicMetadataEnable[k], -							v->DynamicMetadataVMEnabled, -							v->DynamicMetadataLinesBeforeActiveRequired[k], -							v->DynamicMetadataTransmittedBytes[k], -							v->UrgLatency[i], -							v->ExtraLatency, -							v->TimeCalc, -							v->PDEAndMetaPTEBytesPerFrame[i][j][k], -							v->MetaRowBytes[i][j][k], -							v->DPTEBytesPerRow[i][j][k], -							v->PrefetchLinesY[i][j][k], -							v->SwathWidthYThisState[k], -							v->PrefillY[k], -							v->MaxNumSwY[k], -							v->PrefetchLinesC[i][j][k], -							v->SwathWidthCThisState[k], -							v->PrefillC[k], -							v->MaxNumSwC[k], -							v->swath_width_luma_ub_this_state[k], -							v->swath_width_chroma_ub_this_state[k], -							v->SwathHeightYThisState[k], -							v->SwathHeightCThisState[k], -							v->TWait, -							&v->DSTXAfterScaler[k], -							&v->DSTYAfterScaler[k], -							&v->LineTimesForPrefetch[k], -							&v->PrefetchBW[k], -							&v->LinesForMetaPTE[k], -							&v->LinesForMetaAndDPTERow[k], -							&v->VRatioPreY[i][j][k], -							&v->VRatioPreC[i][j][k], -							&v->RequiredPrefetchPixelDataBWLuma[i][j][k], -							&v->RequiredPrefetchPixelDataBWChroma[i][j][k], -							&v->NoTimeForDynamicMetadata[i][j][k], -							&v->Tno_bw[k], -							&v->prefetch_vmrow_bw[k], -							&v->dummy7[k], -							&v->dummy8[k], -							&v->dummy13[k], -							&v->VUpdateOffsetPix[k], -							&v->VUpdateWidthPix[k], -							&v->VReadyOffsetPix[k]); +					CalculatePrefetchSchedulePerPlane(mode_lib, +									  HostVMInefficiencyFactor, +									  i, j,	k);  				}  				for (k = 0; k < v->NumberOfActivePlanes; k++) { @@ -7249,69 +7150,15 @@ static double CalculateUrgentLatency(  static void UseMinimumDCFCLK(  		struct display_mode_lib *mode_lib, -		int MaxInterDCNTileRepeaters,  		int MaxPrefetchMode, -		double FinalDRAMClockChangeLatency, -		double SREnterPlusExitTime, -		int ReturnBusWidth, -		int RoundTripPingLatencyCycles, -		int ReorderingBytes, -		int PixelChunkSizeInKByte, -		int MetaChunkSize, -		bool GPUVMEnable, -		int GPUVMMaxPageTableLevels, -		bool HostVMEnable, -		int NumberOfActivePlanes, -		double HostVMMinPageSize, -		int HostVMMaxNonCachedPageTableLevels, -		bool DynamicMetadataVMEnabled, -		enum immediate_flip_requirement ImmediateFlipRequirement, -		bool ProgressiveToInterlaceUnitInOPP, -		double MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation, -		double PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency, -		int VTotal[], -		int VActive[], -		int DynamicMetadataTransmittedBytes[], -		int DynamicMetadataLinesBeforeActiveRequired[], -		bool Interlace[], -		double RequiredDPPCLK[][2][DC__NUM_DPP__MAX], -		double RequiredDISPCLK[][2], -		double UrgLatency[], -		unsigned int NoOfDPP[][2][DC__NUM_DPP__MAX], -		double ProjectedDCFCLKDeepSleep[][2], -		double MaximumVStartup[][2][DC__NUM_DPP__MAX], -		double TotalVActivePixelBandwidth[][2], -		double TotalVActiveCursorBandwidth[][2], -		double TotalMetaRowBandwidth[][2], -		double TotalDPTERowBandwidth[][2], -		unsigned int TotalNumberOfActiveDPP[][2], -		unsigned int TotalNumberOfDCCActiveDPP[][2], -		int dpte_group_bytes[], -		double PrefetchLinesY[][2][DC__NUM_DPP__MAX], -		double PrefetchLinesC[][2][DC__NUM_DPP__MAX], -		int swath_width_luma_ub_all_states[][2][DC__NUM_DPP__MAX], -		int swath_width_chroma_ub_all_states[][2][DC__NUM_DPP__MAX], -		int BytePerPixelY[], -		int BytePerPixelC[], -		int HTotal[], -		double PixelClock[], -		double PDEAndMetaPTEBytesPerFrame[][2][DC__NUM_DPP__MAX], -		double DPTEBytesPerRow[][2][DC__NUM_DPP__MAX], -		double MetaRowBytes[][2][DC__NUM_DPP__MAX], -		bool DynamicMetadataEnable[], -		double VActivePixelBandwidth[][2][DC__NUM_DPP__MAX], -		double VActiveCursorBandwidth[][2][DC__NUM_DPP__MAX], -		double ReadBandwidthLuma[], -		double ReadBandwidthChroma[], -		double DCFCLKPerState[], -		double DCFCLKState[][2]) +		int ReorderingBytes)  {  	struct vba_vars_st *v = &mode_lib->vba;  	int dummy1, i, j, k;  	double NormalEfficiency,  dummy2, dummy3;  	double TotalMaxPrefetchFlipDPTERowBandwidth[DC__VOLTAGE_STATES][2]; -	NormalEfficiency = PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0; +	NormalEfficiency = v->PercentOfIdealFabricAndSDPPortBWReceivedAfterUrgLatency / 100.0;  	for (i = 0; i < v->soc.num_states; ++i) {  		for (j = 0; j <= 1; ++j) {  			double PixelDCFCLKCyclesRequiredInPrefetch[DC__NUM_DPP__MAX]; @@ -7329,61 +7176,61 @@ static void UseMinimumDCFCLK(  			double MinimumTvmPlus2Tr0;  			TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = 0; -			for (k = 0; k < NumberOfActivePlanes; ++k) { +			for (k = 0; k < v->NumberOfActivePlanes; ++k) {  				TotalMaxPrefetchFlipDPTERowBandwidth[i][j] = TotalMaxPrefetchFlipDPTERowBandwidth[i][j] -						+ NoOfDPP[i][j][k] * DPTEBytesPerRow[i][j][k] / (15.75 * HTotal[k] / PixelClock[k]); +						+ v->NoOfDPP[i][j][k] * v->DPTEBytesPerRow[i][j][k] / (15.75 * v->HTotal[k] / v->PixelClock[k]);  			} -			for (k = 0; k <= NumberOfActivePlanes - 1; ++k) { -				NoOfDPPState[k] = NoOfDPP[i][j][k]; +			for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) { +				NoOfDPPState[k] = v->NoOfDPP[i][j][k];  			} -			MinimumTWait = CalculateTWait(MaxPrefetchMode, FinalDRAMClockChangeLatency, UrgLatency[i], SREnterPlusExitTime); -			NonDPTEBandwidth = TotalVActivePixelBandwidth[i][j] + TotalVActiveCursorBandwidth[i][j] + TotalMetaRowBandwidth[i][j]; -			DPTEBandwidth = (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) ? -					TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : TotalDPTERowBandwidth[i][j]; +			MinimumTWait = CalculateTWait(MaxPrefetchMode, v->FinalDRAMClockChangeLatency, v->UrgLatency[i], v->SREnterPlusExitTime); +			NonDPTEBandwidth = v->TotalVActivePixelBandwidth[i][j] + v->TotalVActiveCursorBandwidth[i][j] + v->TotalMetaRowBandwidth[i][j]; +			DPTEBandwidth = (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) ? +					TotalMaxPrefetchFlipDPTERowBandwidth[i][j] : v->TotalDPTERowBandwidth[i][j];  			DCFCLKRequiredForAverageBandwidth = dml_max3( -					ProjectedDCFCLKDeepSleep[i][j], -					(NonDPTEBandwidth + TotalDPTERowBandwidth[i][j]) / ReturnBusWidth -							/ (MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100), -					(NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / ReturnBusWidth); +					v->ProjectedDCFCLKDeepSleep[i][j], +					(NonDPTEBandwidth + v->TotalDPTERowBandwidth[i][j]) / v->ReturnBusWidth +							/ (v->MaxAveragePercentOfIdealFabricAndSDPPortBWDisplayCanUseInNormalSystemOperation / 100), +					(NonDPTEBandwidth + DPTEBandwidth / NormalEfficiency) / NormalEfficiency / v->ReturnBusWidth);  			ExtraLatencyBytes = CalculateExtraLatencyBytes(  					ReorderingBytes, -					TotalNumberOfActiveDPP[i][j], -					PixelChunkSizeInKByte, -					TotalNumberOfDCCActiveDPP[i][j], -					MetaChunkSize, -					GPUVMEnable, -					HostVMEnable, -					NumberOfActivePlanes, +					v->TotalNumberOfActiveDPP[i][j], +					v->PixelChunkSizeInKByte, +					v->TotalNumberOfDCCActiveDPP[i][j], +					v->MetaChunkSize, +					v->GPUVMEnable, +					v->HostVMEnable, +					v->NumberOfActivePlanes,  					NoOfDPPState, -					dpte_group_bytes, +					v->dpte_group_bytes,  					1, -					HostVMMinPageSize, -					HostVMMaxNonCachedPageTableLevels); -			ExtraLatencyCycles = RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / ReturnBusWidth; -			for (k = 0; k < NumberOfActivePlanes; ++k) { +					v->HostVMMinPageSize, +					v->HostVMMaxNonCachedPageTableLevels); +			ExtraLatencyCycles = v->RoundTripPingLatencyCycles + __DML_ARB_TO_RET_DELAY__ + ExtraLatencyBytes / NormalEfficiency / v->ReturnBusWidth; +			for (k = 0; k < v->NumberOfActivePlanes; ++k) {  				double DCFCLKCyclesRequiredInPrefetch;  				double ExpectedPrefetchBWAcceleration;  				double PrefetchTime; -				PixelDCFCLKCyclesRequiredInPrefetch[k] = (PrefetchLinesY[i][j][k] * swath_width_luma_ub_all_states[i][j][k] * BytePerPixelY[k] -						+ PrefetchLinesC[i][j][k] * swath_width_chroma_ub_all_states[i][j][k] * BytePerPixelC[k]) / NormalEfficiency / ReturnBusWidth; +				PixelDCFCLKCyclesRequiredInPrefetch[k] = (v->PrefetchLinesY[i][j][k] * v->swath_width_luma_ub_all_states[i][j][k] * v->BytePerPixelY[k] +						+ v->PrefetchLinesC[i][j][k] * v->swath_width_chroma_ub_all_states[i][j][k] * v->BytePerPixelC[k]) / NormalEfficiency / v->ReturnBusWidth;  				DCFCLKCyclesRequiredInPrefetch = 2 * ExtraLatencyCycles / NoOfDPPState[k] -						+ PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / ReturnBusWidth * (GPUVMMaxPageTableLevels > 2 ? 1 : 0) -						+ 2 * DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / ReturnBusWidth -						+ 2 * MetaRowBytes[i][j][k] / NormalEfficiency / ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k]; -				PrefetchPixelLinesTime[k] = dml_max(PrefetchLinesY[i][j][k], PrefetchLinesC[i][j][k]) * HTotal[k] / PixelClock[k]; -				ExpectedPrefetchBWAcceleration = (VActivePixelBandwidth[i][j][k] + VActiveCursorBandwidth[i][j][k]) -						/ (ReadBandwidthLuma[k] + ReadBandwidthChroma[k]); +						+ v->PDEAndMetaPTEBytesPerFrame[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth * (v->GPUVMMaxPageTableLevels > 2 ? 1 : 0) +						+ 2 * v->DPTEBytesPerRow[i][j][k] / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth +						+ 2 * v->MetaRowBytes[i][j][k] / NormalEfficiency / v->ReturnBusWidth + PixelDCFCLKCyclesRequiredInPrefetch[k]; +				PrefetchPixelLinesTime[k] = dml_max(v->PrefetchLinesY[i][j][k], v->PrefetchLinesC[i][j][k]) * v->HTotal[k] / v->PixelClock[k]; +				ExpectedPrefetchBWAcceleration = (v->VActivePixelBandwidth[i][j][k] + v->VActiveCursorBandwidth[i][j][k]) +						/ (v->ReadBandwidthLuma[k] + v->ReadBandwidthChroma[k]);  				DynamicMetadataVMExtraLatency[k] = -						(GPUVMEnable == true && DynamicMetadataEnable[k] == true && DynamicMetadataVMEnabled == true) ? -								UrgLatency[i] * GPUVMMaxPageTableLevels * (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0; -				PrefetchTime = (MaximumVStartup[i][j][k] - 1) * HTotal[k] / PixelClock[k] - MinimumTWait -						- UrgLatency[i] -								* ((GPUVMMaxPageTableLevels <= 2 ? GPUVMMaxPageTableLevels : GPUVMMaxPageTableLevels - 2) -										* (HostVMEnable == true ? HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1) +						(v->GPUVMEnable == true && v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true) ? +								v->UrgLatency[i] * v->GPUVMMaxPageTableLevels * (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) : 0; +				PrefetchTime = (v->MaximumVStartup[i][j][k] - 1) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait +						- v->UrgLatency[i] +								* ((v->GPUVMMaxPageTableLevels <= 2 ? v->GPUVMMaxPageTableLevels : v->GPUVMMaxPageTableLevels - 2) +										* (v->HostVMEnable == true ? v->HostVMMaxNonCachedPageTableLevels + 1 : 1) - 1)  						- DynamicMetadataVMExtraLatency[k];  				if (PrefetchTime > 0) { @@ -7392,14 +7239,14 @@ static void UseMinimumDCFCLK(  							/ (PrefetchTime * PixelDCFCLKCyclesRequiredInPrefetch[k] / DCFCLKCyclesRequiredInPrefetch);  					DCFCLKRequiredForPeakBandwidthPerPlane[k] = NoOfDPPState[k] * PixelDCFCLKCyclesRequiredInPrefetch[k] / PrefetchPixelLinesTime[k]  							* dml_max(1.0, ExpectedVRatioPrefetch) * dml_max(1.0, ExpectedVRatioPrefetch / 4) * ExpectedPrefetchBWAcceleration; -					if (HostVMEnable == true || ImmediateFlipRequirement == dm_immediate_flip_required) { +					if (v->HostVMEnable == true || v->ImmediateFlipRequirement[0] == dm_immediate_flip_required) {  						DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKRequiredForPeakBandwidthPerPlane[k] -								+ NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / ReturnBusWidth; +								+ NoOfDPPState[k] * DPTEBandwidth / NormalEfficiency / NormalEfficiency / v->ReturnBusWidth;  					}  				} else { -					DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i]; +					DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];  				} -				if (DynamicMetadataEnable[k] == true) { +				if (v->DynamicMetadataEnable[k] == true) {  					double TSetupPipe;  					double TdmbfPipe;  					double TdmsksPipe; @@ -7407,17 +7254,17 @@ static void UseMinimumDCFCLK(  					double AllowedTimeForUrgentExtraLatency;  					CalculateVupdateAndDynamicMetadataParameters( -							MaxInterDCNTileRepeaters, -							RequiredDPPCLK[i][j][k], -							RequiredDISPCLK[i][j], -							ProjectedDCFCLKDeepSleep[i][j], -							PixelClock[k], -							HTotal[k], -							VTotal[k] - VActive[k], -							DynamicMetadataTransmittedBytes[k], -							DynamicMetadataLinesBeforeActiveRequired[k], -							Interlace[k], -							ProgressiveToInterlaceUnitInOPP, +							v->MaxInterDCNTileRepeaters, +							v->RequiredDPPCLK[i][j][k], +							v->RequiredDISPCLK[i][j], +							v->ProjectedDCFCLKDeepSleep[i][j], +							v->PixelClock[k], +							v->HTotal[k], +							v->VTotal[k] - v->VActive[k], +							v->DynamicMetadataTransmittedBytes[k], +							v->DynamicMetadataLinesBeforeActiveRequired[k], +							v->Interlace[k], +							v->ProgressiveToInterlaceUnitInOPP,  							&TSetupPipe,  							&TdmbfPipe,  							&TdmecPipe, @@ -7425,31 +7272,31 @@ static void UseMinimumDCFCLK(  							&dummy1,  							&dummy2,  							&dummy3); -					AllowedTimeForUrgentExtraLatency = MaximumVStartup[i][j][k] * HTotal[k] / PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe +					AllowedTimeForUrgentExtraLatency = v->MaximumVStartup[i][j][k] * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - TSetupPipe - TdmbfPipe - TdmecPipe  							- TdmsksPipe - DynamicMetadataVMExtraLatency[k];  					if (AllowedTimeForUrgentExtraLatency > 0) {  						DCFCLKRequiredForPeakBandwidthPerPlane[k] = dml_max(  								DCFCLKRequiredForPeakBandwidthPerPlane[k],  								ExtraLatencyCycles / AllowedTimeForUrgentExtraLatency);  					} else { -						DCFCLKRequiredForPeakBandwidthPerPlane[k] = DCFCLKPerState[i]; +						DCFCLKRequiredForPeakBandwidthPerPlane[k] = v->DCFCLKPerState[i];  					}  				}  			}  			DCFCLKRequiredForPeakBandwidth = 0; -			for (k = 0; k <= NumberOfActivePlanes - 1; ++k) { +			for (k = 0; k <= v->NumberOfActivePlanes - 1; ++k) {  				DCFCLKRequiredForPeakBandwidth = DCFCLKRequiredForPeakBandwidth + DCFCLKRequiredForPeakBandwidthPerPlane[k];  			} -			MinimumTvmPlus2Tr0 = UrgLatency[i] -					* (GPUVMEnable == true ? -							(HostVMEnable == true ? -									(GPUVMMaxPageTableLevels + 2) * (HostVMMaxNonCachedPageTableLevels + 1) - 1 : GPUVMMaxPageTableLevels + 1) : +			MinimumTvmPlus2Tr0 = v->UrgLatency[i] +					* (v->GPUVMEnable == true ? +							(v->HostVMEnable == true ? +									(v->GPUVMMaxPageTableLevels + 2) * (v->HostVMMaxNonCachedPageTableLevels + 1) - 1 : v->GPUVMMaxPageTableLevels + 1) :  							0); -			for (k = 0; k < NumberOfActivePlanes; ++k) { +			for (k = 0; k < v->NumberOfActivePlanes; ++k) {  				double MaximumTvmPlus2Tr0PlusTsw; -				MaximumTvmPlus2Tr0PlusTsw = (MaximumVStartup[i][j][k] - 2) * HTotal[k] / PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k]; +				MaximumTvmPlus2Tr0PlusTsw = (v->MaximumVStartup[i][j][k] - 2) * v->HTotal[k] / v->PixelClock[k] - MinimumTWait - DynamicMetadataVMExtraLatency[k];  				if (MaximumTvmPlus2Tr0PlusTsw <= MinimumTvmPlus2Tr0 + PrefetchPixelLinesTime[k] / 4) { -					DCFCLKRequiredForPeakBandwidth = DCFCLKPerState[i]; +					DCFCLKRequiredForPeakBandwidth = v->DCFCLKPerState[i];  				} else {  					DCFCLKRequiredForPeakBandwidth = dml_max3(  							DCFCLKRequiredForPeakBandwidth, @@ -7457,7 +7304,7 @@ static void UseMinimumDCFCLK(  							(2 * ExtraLatencyCycles + PixelDCFCLKCyclesRequiredInPrefetch[k]) / (MaximumTvmPlus2Tr0PlusTsw - MinimumTvmPlus2Tr0));  				}  			} -			DCFCLKState[i][j] = dml_min(DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth)); +			v->DCFCLKState[i][j] = dml_min(v->DCFCLKPerState[i], 1.05 * dml_max(DCFCLKRequiredForAverageBandwidth, DCFCLKRequiredForPeakBandwidth));  		}  	}  } diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index 6905ef1e75a6..d76251fd1566 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -73,6 +73,7 @@ struct display_mode_lib {  	struct vba_vars_st vba;  	struct dal_logger *logger;  	struct dml_funcs funcs; +	struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6];  };  void dml_init_instance(struct display_mode_lib *lib, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c new file mode 100644 index 000000000000..789f7562cdc7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c @@ -0,0 +1,1889 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dml_wrapper.h" +#include "resource.h" +#include "core_types.h" +#include "dsc.h" +#include "clk_mgr.h" + +#ifndef DC_LOGGER_INIT +#define DC_LOGGER_INIT +#undef DC_LOG_WARNING +#define DC_LOG_WARNING +#endif + +#define DML_WRAPPER_TRANSLATION_ +#include "dml_wrapper_translation.c" +#undef DML_WRAPPER_TRANSLATION_ + +static bool is_dual_plane(enum surface_pixel_format format) +{ +	return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +static void build_clamping_params(struct dc_stream_state *stream) +{ +	stream->clamping.clamping_level = CLAMPING_FULL_RANGE; +	stream->clamping.c_depth = stream->timing.display_color_depth; +	stream->clamping.pixel_encoding = stream->timing.pixel_encoding; +} + +static void get_pixel_clock_parameters( +	const struct pipe_ctx *pipe_ctx, +	struct pixel_clk_params *pixel_clk_params) +{ +	const struct dc_stream_state *stream = pipe_ctx->stream; + +	/*TODO: is this halved for YCbCr 420? in that case we might want to move +	 * the pixel clock normalization for hdmi up to here instead of doing it +	 * in pll_adjust_pix_clk +	 */ +	pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; +	pixel_clk_params->encoder_object_id = stream->link->link_enc->id; +	pixel_clk_params->signal_type = pipe_ctx->stream->signal; +	pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; +	/* TODO: un-hardcode*/ +	pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * +						LINK_RATE_REF_FREQ_IN_KHZ; +	pixel_clk_params->flags.ENABLE_SS = 0; +	pixel_clk_params->color_depth = +		stream->timing.display_color_depth; +	pixel_clk_params->flags.DISPLAY_BLANKED = 1; +	pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding == +			PIXEL_ENCODING_YCBCR420); +	pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; +	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) { +		pixel_clk_params->color_depth = COLOR_DEPTH_888; +	} +	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { +		pixel_clk_params->requested_pix_clk_100hz  = pixel_clk_params->requested_pix_clk_100hz / 2; +	} +	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) +		pixel_clk_params->requested_pix_clk_100hz *= 2; + +} + +static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) +{ +	get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); + +	if (pipe_ctx->clock_source) +		pipe_ctx->clock_source->funcs->get_pix_clk_dividers( +			pipe_ctx->clock_source, +			&pipe_ctx->stream_res.pix_clk_params, +			&pipe_ctx->pll_settings); + +	pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; + +	resource_build_bit_depth_reduction_params(pipe_ctx->stream, +					&pipe_ctx->stream->bit_depth_params); +	build_clamping_params(pipe_ctx->stream); + +	return DC_OK; +} + +static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, +		struct bit_depth_reduction_params *fmt_bit_depth) +{ +	enum dc_dither_option option = stream->dither_option; +	enum dc_pixel_encoding pixel_encoding = +			stream->timing.pixel_encoding; + +	memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth)); + +	if (option == DITHER_OPTION_DEFAULT) { +		switch (stream->timing.display_color_depth) { +		case COLOR_DEPTH_666: +			option = DITHER_OPTION_SPATIAL6; +			break; +		case COLOR_DEPTH_888: +			option = DITHER_OPTION_SPATIAL8; +			break; +		case COLOR_DEPTH_101010: +			option = DITHER_OPTION_SPATIAL10; +			break; +		default: +			option = DITHER_OPTION_DISABLE; +		} +	} + +	if (option == DITHER_OPTION_DISABLE) +		return; + +	if (option == DITHER_OPTION_TRUN6) { +		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; +		fmt_bit_depth->flags.TRUNCATE_DEPTH = 0; +	} else if (option == DITHER_OPTION_TRUN8 || +			option == DITHER_OPTION_TRUN8_SPATIAL6 || +			option == DITHER_OPTION_TRUN8_FM6) { +		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; +		fmt_bit_depth->flags.TRUNCATE_DEPTH = 1; +	} else if (option == DITHER_OPTION_TRUN10        || +			option == DITHER_OPTION_TRUN10_SPATIAL6   || +			option == DITHER_OPTION_TRUN10_SPATIAL8   || +			option == DITHER_OPTION_TRUN10_FM8     || +			option == DITHER_OPTION_TRUN10_FM6     || +			option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { +		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; +		fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; +	} + +	/* special case - Formatter can only reduce by 4 bits at most. +	 * When reducing from 12 to 6 bits, +	 * HW recommends we use trunc with round mode +	 * (if we did nothing, trunc to 10 bits would be used) +	 * note that any 12->10 bit reduction is ignored prior to DCE8, +	 * as the input was 10 bits. +	 */ +	if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM || +			option == DITHER_OPTION_SPATIAL6 || +			option == DITHER_OPTION_FM6) { +		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; +		fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; +		fmt_bit_depth->flags.TRUNCATE_MODE = 1; +	} + +	/* spatial dither +	 * note that spatial modes 1-3 are never used +	 */ +	if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM            || +			option == DITHER_OPTION_SPATIAL6 || +			option == DITHER_OPTION_TRUN10_SPATIAL6      || +			option == DITHER_OPTION_TRUN8_SPATIAL6) { +		fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; +		fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0; +		fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; +		fmt_bit_depth->flags.RGB_RANDOM = +				(pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; +	} else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM            || +			option == DITHER_OPTION_SPATIAL8 || +			option == DITHER_OPTION_SPATIAL8_FM6        || +			option == DITHER_OPTION_TRUN10_SPATIAL8      || +			option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { +		fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; +		fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1; +		fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; +		fmt_bit_depth->flags.RGB_RANDOM = +				(pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; +	} else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM || +			option == DITHER_OPTION_SPATIAL10 || +			option == DITHER_OPTION_SPATIAL10_FM8 || +			option == DITHER_OPTION_SPATIAL10_FM6) { +		fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; +		fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2; +		fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; +		fmt_bit_depth->flags.RGB_RANDOM = +				(pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; +	} + +	if (option == DITHER_OPTION_SPATIAL6 || +			option == DITHER_OPTION_SPATIAL8 || +			option == DITHER_OPTION_SPATIAL10) { +		fmt_bit_depth->flags.FRAME_RANDOM = 0; +	} else { +		fmt_bit_depth->flags.FRAME_RANDOM = 1; +	} + +	////////////////////// +	//// temporal dither +	////////////////////// +	if (option == DITHER_OPTION_FM6           || +			option == DITHER_OPTION_SPATIAL8_FM6     || +			option == DITHER_OPTION_SPATIAL10_FM6     || +			option == DITHER_OPTION_TRUN10_FM6     || +			option == DITHER_OPTION_TRUN8_FM6      || +			option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { +		fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; +		fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0; +	} else if (option == DITHER_OPTION_FM8        || +			option == DITHER_OPTION_SPATIAL10_FM8  || +			option == DITHER_OPTION_TRUN10_FM8) { +		fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; +		fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1; +	} else if (option == DITHER_OPTION_FM10) { +		fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; +		fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2; +	} + +	fmt_bit_depth->pixel_encoding = pixel_encoding; +} + +bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx) +{ +	int i; + +	/* Validate DSC config, dsc count validation is already done */ +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; +		struct dc_stream_state *stream = pipe_ctx->stream; +		struct dsc_config dsc_cfg; +		struct pipe_ctx *odm_pipe; +		int opp_cnt = 1; + +		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) +			opp_cnt++; + +		/* Only need to validate top pipe */ +		if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC) +			continue; + +		dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left +				+ stream->timing.h_border_right) / opp_cnt; +		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top +				+ stream->timing.v_border_bottom; +		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; +		dsc_cfg.color_depth = stream->timing.display_color_depth; +		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; +		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; +		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + +		if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) +			return false; +	} +	return true; +} + +enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) +{ +	enum dc_status status = DC_OK; +	struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + +	if (!pipe_ctx) +		return DC_ERROR_UNEXPECTED; + + +	status = build_pipe_hw_param(pipe_ctx); + +	return status; +} + +void dml_acquire_dsc(const struct dc *dc, +			struct resource_context *res_ctx, +			struct display_stream_compressor **dsc, +			int pipe_idx) +{ +	int i; +	const struct resource_pool *pool = dc->res_pool; +	struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc; + +	ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */ +	*dsc = NULL; + +	/* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */ +	if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { +		*dsc = pool->dscs[pipe_idx]; +		res_ctx->is_dsc_acquired[pipe_idx] = true; +		return; +	} + +	/* Return old DSC to avoid the need for redo it */ +	if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) { +		*dsc = dsc_old; +		res_ctx->is_dsc_acquired[dsc_old->inst] = true; +		return ; +	} + +	/* Find first free DSC */ +	for (i = 0; i < pool->res_cap->num_dsc; i++) +		if (!res_ctx->is_dsc_acquired[i]) { +			*dsc = pool->dscs[i]; +			res_ctx->is_dsc_acquired[i] = true; +			break; +		} +} + +static bool dml_split_stream_for_mpc_or_odm( +		const struct dc *dc, +		struct resource_context *res_ctx, +		struct pipe_ctx *pri_pipe, +		struct pipe_ctx *sec_pipe, +		bool odm) +{ +	int pipe_idx = sec_pipe->pipe_idx; +	const struct resource_pool *pool = dc->res_pool; + +	*sec_pipe = *pri_pipe; + +	sec_pipe->pipe_idx = pipe_idx; +	sec_pipe->plane_res.mi = pool->mis[pipe_idx]; +	sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; +	sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; +	sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; +	sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; +	sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; +	sec_pipe->stream_res.dsc = NULL; +	if (odm) { +		if (pri_pipe->next_odm_pipe) { +			ASSERT(pri_pipe->next_odm_pipe != sec_pipe); +			sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe; +			sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe; +		} +		if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) { +			pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe; +			sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe; +		} +		if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) { +			pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe; +			sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe; +		} +		pri_pipe->next_odm_pipe = sec_pipe; +		sec_pipe->prev_odm_pipe = pri_pipe; +		ASSERT(sec_pipe->top_pipe == NULL); + +		if (!sec_pipe->top_pipe) +			sec_pipe->stream_res.opp = pool->opps[pipe_idx]; +		else +			sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp; +		if (sec_pipe->stream->timing.flags.DSC == 1) { +			dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); +			ASSERT(sec_pipe->stream_res.dsc); +			if (sec_pipe->stream_res.dsc == NULL) +				return false; +		} +	} else { +		if (pri_pipe->bottom_pipe) { +			ASSERT(pri_pipe->bottom_pipe != sec_pipe); +			sec_pipe->bottom_pipe = pri_pipe->bottom_pipe; +			sec_pipe->bottom_pipe->top_pipe = sec_pipe; +		} +		pri_pipe->bottom_pipe = sec_pipe; +		sec_pipe->top_pipe = pri_pipe; + +		ASSERT(pri_pipe->plane_state); +	} + +	return true; +} + +static struct pipe_ctx *dml_find_split_pipe( +		struct dc *dc, +		struct dc_state *context, +		int old_index) +{ +	struct pipe_ctx *pipe = NULL; +	int i; + +	if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) { +		pipe = &context->res_ctx.pipe_ctx[old_index]; +		pipe->pipe_idx = old_index; +	} + +	if (!pipe) +		for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { +			if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL +					&& dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { +				if (context->res_ctx.pipe_ctx[i].stream == NULL) { +					pipe = &context->res_ctx.pipe_ctx[i]; +					pipe->pipe_idx = i; +					break; +				} +			} +		} + +	/* +	 * May need to fix pipes getting tossed from 1 opp to another on flip +	 * Add for debugging transient underflow during topology updates: +	 * ASSERT(pipe); +	 */ +	if (!pipe) +		for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { +			if (context->res_ctx.pipe_ctx[i].stream == NULL) { +				pipe = &context->res_ctx.pipe_ctx[i]; +				pipe->pipe_idx = i; +				break; +			} +		} + +	return pipe; +} + +static void dml_release_dsc(struct resource_context *res_ctx, +			const struct resource_pool *pool, +			struct display_stream_compressor **dsc) +{ +	int i; + +	for (i = 0; i < pool->res_cap->num_dsc; i++) +		if (pool->dscs[i] == *dsc) { +			res_ctx->is_dsc_acquired[i] = false; +			*dsc = NULL; +			break; +		} +} + +static int dml_get_num_mpc_splits(struct pipe_ctx *pipe) +{ +	int mpc_split_count = 0; +	struct pipe_ctx *other_pipe = pipe->bottom_pipe; + +	while (other_pipe && other_pipe->plane_state == pipe->plane_state) { +		mpc_split_count++; +		other_pipe = other_pipe->bottom_pipe; +	} +	other_pipe = pipe->top_pipe; +	while (other_pipe && other_pipe->plane_state == pipe->plane_state) { +		mpc_split_count++; +		other_pipe = other_pipe->top_pipe; +	} + +	return mpc_split_count; +} + +static bool dml_enough_pipes_for_subvp(struct dc *dc, +		struct dc_state *context) +{ +	int i = 0; +	int num_pipes = 0; + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + +		if (pipe->stream && pipe->plane_state) +			num_pipes++; +	} + +	// Sub-VP only possible if the number of "real" pipes is +	// less than or equal to half the number of available pipes +	if (num_pipes * 2 > dc->res_pool->pipe_count) +		return false; + +	return true; +} + +static int dml_validate_apply_pipe_split_flags( +		struct dc *dc, +		struct dc_state *context, +		int vlevel, +		int *split, +		bool *merge) +{ +	int i, pipe_idx, vlevel_split; +	int plane_count = 0; +	bool force_split = false; +	bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID; +	struct vba_vars_st *v = &context->bw_ctx.dml.vba; +	int max_mpc_comb = v->maxMpcComb; + +	if (context->stream_count > 1) { +		if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) +			avoid_split = true; +	} else if (dc->debug.force_single_disp_pipe_split) +			force_split = true; + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + +		/** +		 * Workaround for avoiding pipe-split in cases where we'd split +		 * planes that are too small, resulting in splits that aren't +		 * valid for the scaler. +		 */ +		if (pipe->plane_state && +		    (pipe->plane_state->dst_rect.width <= 16 || +		     pipe->plane_state->dst_rect.height <= 16 || +		     pipe->plane_state->src_rect.width <= 16 || +		     pipe->plane_state->src_rect.height <= 16)) +			avoid_split = true; + +		/* TODO: fix dc bugs and remove this split threshold thing */ +		if (pipe->stream && !pipe->prev_odm_pipe && +				(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) +			++plane_count; +	} +	if (plane_count > dc->res_pool->pipe_count / 2) +		avoid_split = true; + +	/* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */ +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; +		struct dc_crtc_timing timing; + +		if (!pipe->stream) +			continue; +		else { +			timing = pipe->stream->timing; +			if (timing.h_border_left + timing.h_border_right +					+ timing.v_border_top + timing.v_border_bottom > 0) { +				avoid_split = true; +				break; +			} +		} +	} + +	/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ +	if (avoid_split) { +		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { +			if (!context->res_ctx.pipe_ctx[i].stream) +				continue; + +			for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) +				if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 && +						v->ModeSupport[vlevel][0]) +					break; +			/* Impossible to not split this pipe */ +			if (vlevel > context->bw_ctx.dml.soc.num_states) +				vlevel = vlevel_split; +			else +				max_mpc_comb = 0; +			pipe_idx++; +		} +		v->maxMpcComb = max_mpc_comb; +	} + +	/* Split loop sets which pipe should be split based on dml outputs and dc flags */ +	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; +		int pipe_plane = v->pipe_plane[pipe_idx]; +		bool split4mpc = context->stream_count == 1 && plane_count == 1 +				&& dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4; + +		if (!context->res_ctx.pipe_ctx[i].stream) +			continue; + +		if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4) +			split[i] = 4; +		else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2) +				split[i] = 2; + +		if ((pipe->stream->view_format == +				VIEW_3D_FORMAT_SIDE_BY_SIDE || +				pipe->stream->view_format == +				VIEW_3D_FORMAT_TOP_AND_BOTTOM) && +				(pipe->stream->timing.timing_3d_format == +				TIMING_3D_FORMAT_TOP_AND_BOTTOM || +				 pipe->stream->timing.timing_3d_format == +				TIMING_3D_FORMAT_SIDE_BY_SIDE)) +			split[i] = 2; +		if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { +			split[i] = 2; +			v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1; +		} +		if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) { +			split[i] = 4; +			v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1; +		} +		/*420 format workaround*/ +		if (pipe->stream->timing.h_addressable > 7680 && +				pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { +			split[i] = 4; +		} + +		v->ODMCombineEnabled[pipe_plane] = +			v->ODMCombineEnablePerState[vlevel][pipe_plane]; + +		if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { +			if (dml_get_num_mpc_splits(pipe) == 1) { +				/*If need split for mpc but 2 way split already*/ +				if (split[i] == 4) +					split[i] = 2; /* 2 -> 4 MPC */ +				else if (split[i] == 2) +					split[i] = 0; /* 2 -> 2 MPC */ +				else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) +					merge[i] = true; /* 2 -> 1 MPC */ +			} else if (dml_get_num_mpc_splits(pipe) == 3) { +				/*If need split for mpc but 4 way split already*/ +				if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe) +						|| !pipe->bottom_pipe)) { +					merge[i] = true; /* 4 -> 2 MPC */ +				} else if (split[i] == 0 && pipe->top_pipe && +						pipe->top_pipe->plane_state == pipe->plane_state) +					merge[i] = true; /* 4 -> 1 MPC */ +				split[i] = 0; +			} else if (dml_get_num_mpc_splits(pipe)) { +				/* ODM -> MPC transition */ +				if (pipe->prev_odm_pipe) { +					split[i] = 0; +					merge[i] = true; +				} +			} +		} else { +			if (dml_get_num_mpc_splits(pipe) == 1) { +				/*If need split for odm but 2 way split already*/ +				if (split[i] == 4) +					split[i] = 2; /* 2 -> 4 ODM */ +				else if (split[i] == 2) +					split[i] = 0; /* 2 -> 2 ODM */ +				else if (pipe->prev_odm_pipe) { +					ASSERT(0); /* NOT expected yet */ +					merge[i] = true; /* exit ODM */ +				} +			} else if (dml_get_num_mpc_splits(pipe) == 3) { +				/*If need split for odm but 4 way split already*/ +				if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe) +						|| !pipe->next_odm_pipe)) { +					ASSERT(0); /* NOT expected yet */ +					merge[i] = true; /* 4 -> 2 ODM */ +				} else if (split[i] == 0 && pipe->prev_odm_pipe) { +					ASSERT(0); /* NOT expected yet */ +					merge[i] = true; /* exit ODM */ +				} +				split[i] = 0; +			} else if (dml_get_num_mpc_splits(pipe)) { +				/* MPC -> ODM transition */ +				ASSERT(0); /* NOT expected yet */ +				if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { +					split[i] = 0; +					merge[i] = true; +				} +			} +		} + +		/* Adjust dppclk when split is forced, do not bother with dispclk */ +		if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) +			v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2; +		pipe_idx++; +	} + +	return vlevel; +} + +static void dml_set_phantom_stream_timing(struct dc *dc, +		struct dc_state *context, +		struct pipe_ctx *ref_pipe, +		struct dc_stream_state *phantom_stream) +{ +	// phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width +	uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 + +					dc->caps.subvp_fw_processing_delay_us + +					dc->caps.subvp_pstate_allow_width_us; +	uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) * +					(ref_pipe->stream->timing.pix_clk_100hz * 100) / +					(double)ref_pipe->stream->timing.h_total; +	uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start; + +	phantom_stream->dst.y = 0; +	phantom_stream->dst.height = phantom_vactive; +	phantom_stream->src.y = 0; +	phantom_stream->src.height = phantom_vactive; + +	phantom_stream->timing.v_addressable = phantom_vactive; +	phantom_stream->timing.v_front_porch = 1; +	phantom_stream->timing.v_total = phantom_stream->timing.v_addressable + +						phantom_stream->timing.v_front_porch + +						phantom_stream->timing.v_sync_width + +						phantom_bp; +} + +static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc, +		struct dc_state *context, +		struct pipe_ctx *ref_pipe) +{ +	struct dc_stream_state *phantom_stream = NULL; + +	phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink); +	phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; +	phantom_stream->dpms_off = true; +	phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; +	phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; +	ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; +	ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; + +	/* stream has limited viewport and small timing */ +	memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); +	memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src)); +	memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); +	dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream); + +	dc_add_stream_to_ctx(dc, context, phantom_stream); +	dc->hwss.apply_ctx_to_hw(dc, context); +	return phantom_stream; +} + +static void dml_enable_phantom_plane(struct dc *dc, +		struct dc_state *context, +		struct dc_stream_state *phantom_stream, +		struct pipe_ctx *main_pipe) +{ +	struct dc_plane_state *phantom_plane = NULL; +	struct dc_plane_state *prev_phantom_plane = NULL; +	struct pipe_ctx *curr_pipe = main_pipe; + +	while (curr_pipe) { +		if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) +			phantom_plane = prev_phantom_plane; +		else +			phantom_plane = dc_create_plane_state(dc); + +		memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); +		memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, +				sizeof(phantom_plane->scaling_quality)); +		memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect)); +		memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect)); +		memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect)); +		memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size, +				sizeof(phantom_plane->plane_size)); +		memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info, +				sizeof(phantom_plane->tiling_info)); +		memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc)); +		/* Currently compat_level is undefined in dc_state +		* phantom_plane->compat_level = curr_pipe->plane_state->compat_level; +		*/ +		phantom_plane->format = curr_pipe->plane_state->format; +		phantom_plane->rotation = curr_pipe->plane_state->rotation; +		phantom_plane->visible = curr_pipe->plane_state->visible; + +		/* Shadow pipe has small viewport. */ +		phantom_plane->clip_rect.y = 0; +		phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable; + +		dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context); + +		curr_pipe = curr_pipe->bottom_pipe; +		prev_phantom_plane = phantom_plane; +	} +} + +static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context) +{ +	int i = 0; + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; +		struct dc_stream_state *ref_stream = pipe->stream; +		// Only construct phantom stream for top pipes that have plane enabled +		if (!pipe->top_pipe && pipe->plane_state && pipe->stream && +				pipe->stream->mall_stream_config.type == SUBVP_NONE) { +			struct dc_stream_state *phantom_stream = NULL; + +			phantom_stream = dml_enable_phantom_stream(dc, context, pipe); +			dml_enable_phantom_plane(dc, context, phantom_stream, pipe); +		} +	} + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + +		if (pipe->plane_state && pipe->stream && +				pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { +			pipe->stream->use_dynamic_meta = false; +			pipe->plane_state->flip_immediate = false; +			if (!resource_build_scaling_params(pipe)) { +				// Log / remove phantom pipes since failed to build scaling params +			} +		} +	} +} + +static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context) +{ +	int i; +	bool removed_pipe = false; + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; +		// build scaling params for phantom pipes +		if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { +			dc_rem_all_planes_for_stream(dc, pipe->stream, context); +			dc_remove_stream_from_ctx(dc, context, pipe->stream); +			removed_pipe = true; +		} + +		// Clear all phantom stream info +		if (pipe->stream) { +			pipe->stream->mall_stream_config.type = SUBVP_NONE; +			pipe->stream->mall_stream_config.paired_stream = NULL; +		} +	} +	if (removed_pipe) +		dc->hwss.apply_ctx_to_hw(dc, context); +} + +/* + * If the input state contains no upstream planes for a particular pipe (i.e. only timing) + * we need to populate some "conservative" plane information as DML cannot handle "no planes" + */ +static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe) +{ +	pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled; +	pipe->src.source_scan = dm_horz; +	pipe->src.sw_mode = dm_sw_4kb_s; +	pipe->src.macro_tile_size = dm_64k_tile; +	pipe->src.viewport_width = timing->h_addressable; +	if (pipe->src.viewport_width > 1920) +		pipe->src.viewport_width = 1920; +	pipe->src.viewport_height = timing->v_addressable; +	if (pipe->src.viewport_height > 1080) +		pipe->src.viewport_height = 1080; +	pipe->src.surface_height_y = pipe->src.viewport_height; +	pipe->src.surface_width_y = pipe->src.viewport_width; +	pipe->src.surface_height_c = pipe->src.viewport_height; +	pipe->src.surface_width_c = pipe->src.viewport_width; +	pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256; +	pipe->src.source_format = dm_444_32; +	pipe->dest.recout_width = pipe->src.viewport_width; +	pipe->dest.recout_height = pipe->src.viewport_height; +	pipe->dest.full_recout_width = pipe->dest.recout_width; +	pipe->dest.full_recout_height = pipe->dest.recout_height; +	pipe->scale_ratio_depth.lb_depth = dm_lb_16; +	pipe->scale_ratio_depth.hscl_ratio = 1.0; +	pipe->scale_ratio_depth.vscl_ratio = 1.0; +	pipe->scale_ratio_depth.scl_enable = 0; +	pipe->scale_taps.htaps = 1; +	pipe->scale_taps.vtaps = 1; +	pipe->dest.vtotal_min = timing->v_total; +	pipe->dest.vtotal_max = timing->v_total; + +	if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) { +		pipe->src.viewport_width /= 2; +		pipe->dest.recout_width /= 2; +	} else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) { +		pipe->src.viewport_width /= 4; +		pipe->dest.recout_width /= 4; +	} + +	pipe->src.dcc = false; +	pipe->src.dcc_rate = 1; +} + +/* + * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its + * hsplit group is equal to its own pipe ID + * Otherwise, all pipes part of the same blending tree have the same hsplit group + * ID as the top most pipe + * + * If the pipe ctx is ODM combined, then similar logic follows + */ +static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe) +{ +	e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; + +	if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state +			== dc_pipe_ctx->plane_state) { +		struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe; +		int split_idx = 0; + +		while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state +				== dc_pipe_ctx->plane_state) { +			first_pipe = first_pipe->top_pipe; +			split_idx++; +		} + +		/* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */ +		if (split_idx == 0) +			e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx; +		else if (split_idx == 1) +			e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; +		else if (split_idx == 2) +			e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx; + +	} else if (dc_pipe_ctx->prev_odm_pipe) { +		struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe; + +		while (first_pipe->prev_odm_pipe) +			first_pipe = first_pipe->prev_odm_pipe; +		e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx; +	} +} + +static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale) +{ +	const struct dc_plane_state *pln = dc_pipe_ctx->plane_state; +	const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data; + +	e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate; +	e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln) +			|| (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln) +			|| e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled; + +	/* stereo is not split */ +	if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE || +		pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) { +		e2e_pipe->pipe.src.is_hsplit = false; +		e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; +	} + +	e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90 +			|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz; +	e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y; +	e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y; +	e2e_pipe->pipe.src.viewport_width = scl->viewport.width; +	e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width; +	e2e_pipe->pipe.src.viewport_height = scl->viewport.height; +	e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height; +	e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width; +	e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height; +	e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width; +	e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height; +	e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width; +	e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height; + +	if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA +			|| pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { +		e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch; +		e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; +		e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch; +		e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c; +	} else { +		e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch; +		e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch; +	} +	e2e_pipe->pipe.src.dcc = pln->dcc.enable; +	e2e_pipe->pipe.src.dcc_rate = 1; +	e2e_pipe->pipe.dest.recout_width = scl->recout.width; +	e2e_pipe->pipe.dest.recout_height = scl->recout.height; +	e2e_pipe->pipe.dest.full_recout_height = scl->recout.height; +	e2e_pipe->pipe.dest.full_recout_width = scl->recout.width; +	if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1) +		e2e_pipe->pipe.dest.full_recout_width *= 2; +	else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1) +		e2e_pipe->pipe.dest.full_recout_width *= 4; +	else { +		struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe; + +		while (split_pipe && split_pipe->plane_state == pln) { +			e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; +			split_pipe = split_pipe->bottom_pipe; +		} +		split_pipe = dc_pipe_ctx->top_pipe; +		while (split_pipe && split_pipe->plane_state == pln) { +			e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; +			split_pipe = split_pipe->top_pipe; +		} +	} + +	e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16; +	e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32); +	e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32); +	e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32); +	e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32); +	e2e_pipe->pipe.scale_ratio_depth.scl_enable = +			scl->ratios.vert.value != dc_fixpt_one.value +			|| scl->ratios.horz.value != dc_fixpt_one.value +			|| scl->ratios.vert_c.value != dc_fixpt_one.value +			|| scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/ +			|| always_scale; /*support always scale*/ +	e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps; +	e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c; +	e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps; +	e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c; + +	/* Currently compat_level is not defined. Commenting it until further resolution +	 * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) { +		swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle, +				&e2e_pipe->pipe.src.sw_mode); +		e2e_pipe->pipe.src.macro_tile_size = +				swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle); +	} else { +		gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode, +				pln->compat_level, +				&e2e_pipe->pipe.src.sw_mode); +		e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile; +	}*/ + +	e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format); +} + +static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe) +{ +	/* +	* For graphic plane, cursor number is 1, nv12 is 0 +	* bw calculations due to cursor on/off +	*/ +	if (dc_pipe_ctx->plane_state && +			(dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || +			dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)) +		e2e_pipe->pipe.src.num_cursors = 0; +	else +		e2e_pipe->pipe.src.num_cursors = 1; + +	e2e_pipe->pipe.src.cur0_src_width = 256; +	e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit; +} + +static int populate_dml_pipes_from_context_base( +		struct dc *dc, +		struct dc_state *context, +		display_e2e_pipe_params_st *pipes, +		bool fast_validate) +{ +	int pipe_cnt, i; +	bool synchronized_vblank = true; +	struct resource_context *res_ctx = &context->res_ctx; + +	for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { +		if (!res_ctx->pipe_ctx[i].stream) +			continue; + +		if (pipe_cnt < 0) { +			pipe_cnt = i; +			continue; +		} + +		if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream) +			continue; + +		if (dc->debug.disable_timing_sync || +			(!resource_are_streams_timing_synchronizable( +				res_ctx->pipe_ctx[pipe_cnt].stream, +				res_ctx->pipe_ctx[i].stream) && +			!resource_are_vblanks_synchronizable( +				res_ctx->pipe_ctx[pipe_cnt].stream, +				res_ctx->pipe_ctx[i].stream))) { +			synchronized_vblank = false; +			break; +		} +	} + +	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { +		struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; + +		struct audio_check aud_check = {0}; +		if (!res_ctx->pipe_ctx[i].stream) +			continue; + +		/* todo: +		pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; +		pipes[pipe_cnt].pipe.src.dcc = 0; +		pipes[pipe_cnt].pipe.src.vm = 0;*/ + +		pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + +		pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; +		/* todo: rotation?*/ +		pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; +		if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { +			pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; +			/* 1/2 vblank */ +			pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = +				(timing->v_total - timing->v_addressable +					- timing->v_border_top - timing->v_border_bottom) / 2; +			/* 36 bytes dp, 32 hdmi */ +			pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = +				dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32; +		} +		pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; + +		dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest); +		pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; +		pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; + +		pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst; + +		pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]); + +		populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]); + +		pipes[pipe_cnt].dout.dp_lanes = 4; +		pipes[pipe_cnt].dout.is_virtual = 0; +		pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal); +		if (pipes[pipe_cnt].dout.output_type < 0) { +			pipes[pipe_cnt].dout.output_type = dm_dp; +			pipes[pipe_cnt].dout.is_virtual = 1; +		} + +		populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout); + +		if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) +			pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; + +		/* todo: default max for now, until there is logic reflecting this in dc*/ +		pipes[pipe_cnt].dout.dsc_input_bpc = 12; +		/*fill up the audio sample rate (unit in kHz)*/ +		get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check); +		pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000; + +		populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]); + +		if (!res_ctx->pipe_ctx[i].plane_state) { +			populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe); +		} else { +			populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale); +		} + +		pipe_cnt++; +	} + +	/* populate writeback information */ +	if (dc->res_pool) +		dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes); + +	return pipe_cnt; +} + +static int dml_populate_dml_pipes_from_context( +	struct dc *dc, struct dc_state *context, +	display_e2e_pipe_params_st *pipes, +	bool fast_validate) +{ +	int i, pipe_cnt; +	struct resource_context *res_ctx = &context->res_ctx; +	struct pipe_ctx *pipe; + +	populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate); + +	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { +		struct dc_crtc_timing *timing; + +		if (!res_ctx->pipe_ctx[i].stream) +			continue; +		pipe = &res_ctx->pipe_ctx[i]; +		timing = &pipe->stream->timing; + +		pipes[pipe_cnt].pipe.src.gpuvm = true; +		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; +		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; +		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + +		pipes[pipe_cnt].dout.dsc_input_bpc = 0; +		if (pipes[pipe_cnt].dout.dsc_enable) { +			switch (timing->display_color_depth) { +			case COLOR_DEPTH_888: +				pipes[pipe_cnt].dout.dsc_input_bpc = 8; +				break; +			case COLOR_DEPTH_101010: +				pipes[pipe_cnt].dout.dsc_input_bpc = 10; +				break; +			case COLOR_DEPTH_121212: +				pipes[pipe_cnt].dout.dsc_input_bpc = 12; +				break; +			default: +				ASSERT(0); +				break; +			} +		} +		pipe_cnt++; +	} +	dc->config.enable_4to1MPC = false; +	if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { +		if (is_dual_plane(pipe->plane_state->format) +				&& pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { +			dc->config.enable_4to1MPC = true; +		} else if (!is_dual_plane(pipe->plane_state->format)) { +			context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; +			pipes[0].pipe.src.unbounded_req_mode = true; +		} +	} + +	return pipe_cnt; +} + +static void dml_full_validate_bw_helper(struct dc *dc, +		struct dc_state *context, +		display_e2e_pipe_params_st *pipes, +		int *vlevel, +		int *split, +		bool *merge, +		int *pipe_cnt) +{ +	struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + +	/* +	 * DML favors voltage over p-state, but we're more interested in +	 * supporting p-state over voltage. We can't support p-state in +	 * prefetch mode > 0 so try capping the prefetch mode to start. +	 */ +	context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = +		dm_allow_self_refresh_and_mclk_switch; +	*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); +	/* This may adjust vlevel and maxMpcComb */ +	if (*vlevel < context->bw_ctx.dml.soc.num_states) +		*vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); + +	/* Conditions for setting up phantom pipes for SubVP: +	 * 1. Not force disable SubVP +	 * 2. Full update (i.e. !fast_validate) +	 * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) +	 * 4. Display configuration passes validation +	 * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) +	 */ +	if (!dc->debug.force_disable_subvp && +			dml_enough_pipes_for_subvp(dc, context) && +			*vlevel < context->bw_ctx.dml.soc.num_states && +			(vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || +			dc->debug.force_subvp_mclk_switch)) { + +		dml_add_phantom_pipes(dc, context); + +		 /* Create input to DML based on new context which includes phantom pipes +		  * TODO: Input to DML should mark which pipes are phantom +		  */ +		*pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false); +		*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); +		if (*vlevel < context->bw_ctx.dml.soc.num_states) { +			memset(split, 0, MAX_PIPES * sizeof(*split)); +			memset(merge, 0, MAX_PIPES * sizeof(*merge)); +			*vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); +		} + +		// If SubVP pipe config is unsupported (or cannot be used for UCLK switching) +		// remove phantom pipes and repopulate dml pipes +		if (*vlevel == context->bw_ctx.dml.soc.num_states || +				vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { +			dml_remove_phantom_pipes(dc, context); +			*pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false); +		} +	} +} + +static void dcn20_adjust_adaptive_sync_v_startup( +		const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) +{ +	struct dc_crtc_timing patched_crtc_timing; +	uint32_t asic_blank_end   = 0; +	uint32_t asic_blank_start = 0; +	uint32_t newVstartup	  = 0; + +	patched_crtc_timing = *dc_crtc_timing; + +	if (patched_crtc_timing.flags.INTERLACE == 1) { +		if (patched_crtc_timing.v_front_porch < 2) +			patched_crtc_timing.v_front_porch = 2; +	} else { +		if (patched_crtc_timing.v_front_porch < 1) +			patched_crtc_timing.v_front_porch = 1; +	} + +	/* blank_start = frame end - front porch */ +	asic_blank_start = patched_crtc_timing.v_total - +					patched_crtc_timing.v_front_porch; + +	/* blank_end = blank_start - active */ +	asic_blank_end = asic_blank_start - +					patched_crtc_timing.v_border_bottom - +					patched_crtc_timing.v_addressable - +					patched_crtc_timing.v_border_top; + +	newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + +	*vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); +} + +static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) +{ +	return (pipe_ctx->stream_res.hpo_dp_stream_enc && +			pipe_ctx->link_res.hpo_dp_link_enc && +			dc_is_dp_signal(pipe_ctx->stream->signal)); +} + +static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) +{ +	int i; +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		if (!context->res_ctx.pipe_ctx[i].stream) +			continue; +#if defined (CONFIG_DRM_AMD_DC_DP2_0) +		if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) +			return true; +#endif +	} +	return false; +} + +static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) +{ +	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { +		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; +		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; +		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; +	} +} + +static bool dml_internal_validate( +		struct dc *dc, +		struct dc_state *context, +		display_e2e_pipe_params_st *pipes, +		int *pipe_cnt_out, +		int *vlevel_out, +		bool fast_validate) +{ +	bool out = false; +	bool repopulate_pipes = false; +	int split[MAX_PIPES] = { 0 }; +	bool merge[MAX_PIPES] = { false }; +	bool newly_split[MAX_PIPES] = { false }; +	int pipe_cnt, i, pipe_idx, vlevel; +	struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + +	ASSERT(pipes); +	if (!pipes) +		return false; + +	// For each full update, remove all existing phantom pipes first +	dml_remove_phantom_pipes(dc, context); + +	dml_update_soc_for_wm_a(dc, context); + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + +		if (pipe->plane_state) { +			// On initial pass through DML, we intend to use MALL for SS on all +			// (non-PSR) surfaces with none using MALL for P-State +			// 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state +			//if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) +			//	pipe->plane_state->mall_plane_config.use_mall_for_ss = true; +		} +	} +	pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + +	if (!pipe_cnt) { +		out = true; +		goto validate_out; +	} + +	dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); + +	if (!fast_validate) { +		dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt); +	} + +	if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || +			vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { +		/* +		 * If mode is unsupported or there's still no p-state support then +		 * fall back to favoring voltage. +		 * +		 * We don't actually support prefetch mode 2, so require that we +		 * at least support prefetch mode 1. +		 */ +		context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = +			dm_allow_self_refresh; + +		vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); +		if (vlevel < context->bw_ctx.dml.soc.num_states) { +			memset(split, 0, sizeof(split)); +			memset(merge, 0, sizeof(merge)); +			vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); +		} +	} + +	dml_log_mode_support_params(&context->bw_ctx.dml); + +	if (vlevel == context->bw_ctx.dml.soc.num_states) +		goto validate_fail; + +	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; +		struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + +		if (!pipe->stream) +			continue; + +		/* We only support full screen mpo with ODM */ +		if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled +				&& pipe->plane_state && mpo_pipe +				&& memcmp(&mpo_pipe->plane_res.scl_data.recout, +						&pipe->plane_res.scl_data.recout, +						sizeof(struct rect)) != 0) { +			ASSERT(mpo_pipe->plane_state != pipe->plane_state); +			goto validate_fail; +		} +		pipe_idx++; +	} + +	/* merge pipes if necessary */ +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + +		/*skip pipes that don't need merging*/ +		if (!merge[i]) +			continue; + +		/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ +		if (pipe->prev_odm_pipe) { +			/*split off odm pipe*/ +			pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; +			if (pipe->next_odm_pipe) +				pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; + +			pipe->bottom_pipe = NULL; +			pipe->next_odm_pipe = NULL; +			pipe->plane_state = NULL; +			pipe->stream = NULL; +			pipe->top_pipe = NULL; +			pipe->prev_odm_pipe = NULL; +			if (pipe->stream_res.dsc) +				dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); +			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); +			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); +			repopulate_pipes = true; +		} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { +			struct pipe_ctx *top_pipe = pipe->top_pipe; +			struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; + +			top_pipe->bottom_pipe = bottom_pipe; +			if (bottom_pipe) +				bottom_pipe->top_pipe = top_pipe; + +			pipe->top_pipe = NULL; +			pipe->bottom_pipe = NULL; +			pipe->plane_state = NULL; +			pipe->stream = NULL; +			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); +			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); +			repopulate_pipes = true; +		} else +			ASSERT(0); /* Should never try to merge master pipe */ + +	} + +	for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; +		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; +		struct pipe_ctx *hsplit_pipe = NULL; +		bool odm; +		int old_index = -1; + +		if (!pipe->stream || newly_split[i]) +			continue; + +		pipe_idx++; +		odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled; + +		if (!pipe->plane_state && !odm) +			continue; + +		if (split[i]) { +			if (odm) { +				if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe) +					old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; +				else if (old_pipe->next_odm_pipe) +					old_index = old_pipe->next_odm_pipe->pipe_idx; +			} else { +				if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && +						old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) +					old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx; +				else if (old_pipe->bottom_pipe && +						old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) +					old_index = old_pipe->bottom_pipe->pipe_idx; +			} +			hsplit_pipe = dml_find_split_pipe(dc, context, old_index); +			ASSERT(hsplit_pipe); +			if (!hsplit_pipe) +				goto validate_fail; + +			if (!dml_split_stream_for_mpc_or_odm( +					dc, &context->res_ctx, +					pipe, hsplit_pipe, odm)) +				goto validate_fail; + +			newly_split[hsplit_pipe->pipe_idx] = true; +			repopulate_pipes = true; +		} +		if (split[i] == 4) { +			struct pipe_ctx *pipe_4to1; + +			if (odm && old_pipe->next_odm_pipe) +				old_index = old_pipe->next_odm_pipe->pipe_idx; +			else if (!odm && old_pipe->bottom_pipe && +						old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) +				old_index = old_pipe->bottom_pipe->pipe_idx; +			else +				old_index = -1; +			pipe_4to1 = dml_find_split_pipe(dc, context, old_index); +			ASSERT(pipe_4to1); +			if (!pipe_4to1) +				goto validate_fail; +			if (!dml_split_stream_for_mpc_or_odm( +					dc, &context->res_ctx, +					pipe, pipe_4to1, odm)) +				goto validate_fail; +			newly_split[pipe_4to1->pipe_idx] = true; + +			if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe +					&& old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe) +				old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; +			else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && +					old_pipe->bottom_pipe->bottom_pipe->bottom_pipe && +					old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) +				old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx; +			else +				old_index = -1; +			pipe_4to1 = dml_find_split_pipe(dc, context, old_index); +			ASSERT(pipe_4to1); +			if (!pipe_4to1) +				goto validate_fail; +			if (!dml_split_stream_for_mpc_or_odm( +					dc, &context->res_ctx, +					hsplit_pipe, pipe_4to1, odm)) +				goto validate_fail; +			newly_split[pipe_4to1->pipe_idx] = true; +		} +		if (odm) +			dml_build_mapped_resource(dc, context, pipe->stream); +	} + +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + +		if (pipe->plane_state) { +			if (!resource_build_scaling_params(pipe)) +				goto validate_fail; +		} +	} + +	/* Actual dsc count per stream dsc validation*/ +	if (!dml_validate_dsc(dc, context)) { +		vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; +		goto validate_fail; +	} + +	if (repopulate_pipes) +		pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); +	*vlevel_out = vlevel; +	*pipe_cnt_out = pipe_cnt; + +	out = true; +	goto validate_out; + +validate_fail: +	out = false; + +validate_out: +	return out; +} + +static void dml_calculate_dlg_params( +		struct dc *dc, struct dc_state *context, +		display_e2e_pipe_params_st *pipes, +		int pipe_cnt, +		int vlevel) +{ +	int i, pipe_idx; +	int plane_count; + +	/* Writeback MCIF_WB arbitration parameters */ +	if (dc->res_pool) +		dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); + +	context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; +	context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; +	context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; +	context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; +	context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; +	context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; +	context->bw_ctx.bw.dcn.clk.p_state_change_support = +		context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] +							!= dm_dram_clock_change_unsupported; + +	context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; +	/* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks +	 * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ? +			DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW; +	*/ +	plane_count = 0; +	for (i = 0; i < dc->res_pool->pipe_count; i++) { +		if (context->res_ctx.pipe_ctx[i].plane_state) +			plane_count++; +	} + +	/* Commented out as per above error for now. +	if (plane_count == 0) +		context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW; +	*/ +	context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); +	/* TODO : Uncomment the below line and make changes +	 * as per DML nomenclature once it is available. +	 * context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = context->bw_ctx.dml.vba.fclk_pstate_support; +	 */ + +	if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) +		context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; + +	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { +		if (!context->res_ctx.pipe_ctx[i].stream) +			continue; +		pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); +		pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); +		pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); +		pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); +		if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { +			// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests +			context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; +			context->res_ctx.pipe_ctx[i].unbounded_req = false; +		} else { +			context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes; +			context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode; +		} + +		if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) +			context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; +		context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = +						pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; +		context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; +		pipe_idx++; +	} +	/*save a original dppclock copy*/ +	context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; +	context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; +	context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000; +	context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000; +	context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes +						- context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx; + +	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { +		bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2; + +		if (!context->res_ctx.pipe_ctx[i].stream) +			continue; + +		context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml, +				&context->res_ctx.pipe_ctx[i].dlg_regs, +				&context->res_ctx.pipe_ctx[i].ttu_regs, +				pipes, +				pipe_cnt, +				pipe_idx, +				cstate_en, +				context->bw_ctx.bw.dcn.clk.p_state_change_support, +				false, false, true); + +		context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, +				&context->res_ctx.pipe_ctx[i].rq_regs, +				&pipes[pipe_idx].pipe); +		pipe_idx++; +	} +} + +static void dml_calculate_wm_and_dlg( +		struct dc *dc, struct dc_state *context, +		display_e2e_pipe_params_st *pipes, +		int pipe_cnt, +		int vlevel) +{ +	int i, pipe_idx, vlevel_temp = 0; + +	double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz; +	double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; +	unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed; +	bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != +			dm_dram_clock_change_unsupported; + +	/* Set B: +	 * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present, +	 * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark +	 * calculations to cover bootup clocks. +	 * DCFCLK: soc.clock_limits[2] when available +	 * UCLK: soc.clock_limits[2] when available +	 */ +	if (context->bw_ctx.dml.soc.num_states > 2) { +		vlevel_temp = 2; +		dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; +	} else +		dcfclk = 615; //DCFCLK Vmin_lv + +	pipes[0].clks_cfg.voltage = vlevel_temp; +	pipes[0].clks_cfg.dcfclk_mhz = dcfclk; +	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; + +	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) { +		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us; +		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us; +		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us; +	} +	context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	//context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	//context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + +	/* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ +	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 4; +	context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8; + +	/* Set D: +	 * All clocks min. +	 * DCFCLK: Min, as reported by PM FW when available +	 * UCLK  : Min, as reported by PM FW when available +	 * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr) +	 */ + +	if (context->bw_ctx.dml.soc.num_states > 2) { +		vlevel_temp = 0; +		dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; +	} else +		dcfclk = 615; //DCFCLK Vmin_lv + +	pipes[0].clks_cfg.voltage = vlevel_temp; +	pipes[0].clks_cfg.dcfclk_mhz = dcfclk; +	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; + +	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) { +		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us; +		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us; +		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us; +	} +	context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	//context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	//context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + +	/* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ +	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 4; +	context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8; + +	/* Set C, for Dummy P-State: +	 * All clocks min. +	 * DCFCLK: Min, as reported by PM FW, when available +	 * UCLK  : Min,  as reported by PM FW, when available +	 * pstate latency as per UCLK state dummy pstate latency +	 */ +	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) { +		unsigned int min_dram_speed_mts_margin = 160; + +		if ((!pstate_en)) +			min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16; + +		/* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */ +		for (i = 3; i > 0; i--) +			if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) +				break; + +		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us; +		context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us; +		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; +		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; +	} +	context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	//context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	//context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + +	/* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ +	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 4; +	context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8; + +	if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) { +		/* The only difference between A and C is p-state latency, if p-state is not supported +		 * with full p-state latency we want to calculate DLG based on dummy p-state latency, +		 * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation. +		 */ +		context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c; +		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0; +	} else { +		/* Set A: +		 * All clocks min. +		 * DCFCLK: Min, as reported by PM FW, when available +		 * UCLK: Min, as reported by PM FW, when available +		 */ +		dml_update_soc_for_wm_a(dc, context); +		context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +		context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +		context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +		context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +		context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +	} + +	pipes[0].clks_cfg.voltage = vlevel; +	pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation; +	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + +	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { +		if (!context->res_ctx.pipe_ctx[i].stream) +			continue; + +		pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); +		pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + +		if (dc->config.forced_clocks) { +			pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; +			pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; +		} +		if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) +			pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; +		if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) +			pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + +		pipe_idx++; +	} + +	context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod; + +	dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + +	if (!pstate_en) +		/* Restore full p-state latency */ +		context->bw_ctx.dml.soc.dram_clock_change_latency_us = +				dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; +} + +bool dml_validate(struct dc *dc, +		struct dc_state *context, +		bool fast_validate) +{ +	bool out = false; + +	BW_VAL_TRACE_SETUP(); + +	int vlevel = 0; +	int pipe_cnt = 0; +	display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state; +	DC_LOGGER_INIT(dc->ctx->logger); + +	BW_VAL_TRACE_COUNT(); + +	out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + +	if (pipe_cnt == 0) +		goto validate_out; + +	if (!out) +		goto validate_fail; + +	BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + +	if (fast_validate) { +		BW_VAL_TRACE_SKIP(fast); +		goto validate_out; +	} + +	dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + +	BW_VAL_TRACE_END_WATERMARKS(); + +	goto validate_out; + +validate_fail: +	DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", +		dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + +	BW_VAL_TRACE_SKIP(fail); +	out = false; + +validate_out: +	BW_VAL_TRACE_FINISH(); + +	return out; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c new file mode 100644 index 000000000000..4ec5310a2962 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c @@ -0,0 +1,284 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifdef DML_WRAPPER_TRANSLATION_ + +static void gfx10array_mode_to_dml_params( +		enum array_mode_values array_mode, +		enum legacy_tiling_compat_level compat_level, +		unsigned int *sw_mode) +{ +	switch (array_mode) { +	case DC_ARRAY_LINEAR_ALLIGNED: +	case DC_ARRAY_LINEAR_GENERAL: +		*sw_mode = dm_sw_linear; +		break; +	case DC_ARRAY_2D_TILED_THIN1: +// DC_LEGACY_TILING_ADDR_GEN_ZERO - undefined as per current code hence removed +#if 0 +		if (compat_level == DC_LEGACY_TILING_ADDR_GEN_ZERO) +			*sw_mode = dm_sw_gfx7_2d_thin_l_vp; +		else +			*sw_mode = dm_sw_gfx7_2d_thin_gl; +#endif +		break; +	default: +		ASSERT(0); /* Not supported */ +		break; +	} +} + +static void swizzle_to_dml_params( +		enum swizzle_mode_values swizzle, +		unsigned int *sw_mode) +{ +	switch (swizzle) { +	case DC_SW_LINEAR: +		*sw_mode = dm_sw_linear; +		break; +	case DC_SW_4KB_S: +		*sw_mode = dm_sw_4kb_s; +		break; +	case DC_SW_4KB_S_X: +		*sw_mode = dm_sw_4kb_s_x; +		break; +	case DC_SW_4KB_D: +		*sw_mode = dm_sw_4kb_d; +		break; +	case DC_SW_4KB_D_X: +		*sw_mode = dm_sw_4kb_d_x; +		break; +	case DC_SW_64KB_S: +		*sw_mode = dm_sw_64kb_s; +		break; +	case DC_SW_64KB_S_X: +		*sw_mode = dm_sw_64kb_s_x; +		break; +	case DC_SW_64KB_S_T: +		*sw_mode = dm_sw_64kb_s_t; +		break; +	case DC_SW_64KB_D: +		*sw_mode = dm_sw_64kb_d; +		break; +	case DC_SW_64KB_D_X: +		*sw_mode = dm_sw_64kb_d_x; +		break; +	case DC_SW_64KB_D_T: +		*sw_mode = dm_sw_64kb_d_t; +		break; +	case DC_SW_64KB_R_X: +		*sw_mode = dm_sw_64kb_r_x; +		break; +	case DC_SW_VAR_S: +		*sw_mode = dm_sw_var_s; +		break; +	case DC_SW_VAR_S_X: +		*sw_mode = dm_sw_var_s_x; +		break; +	case DC_SW_VAR_D: +		*sw_mode = dm_sw_var_d; +		break; +	case DC_SW_VAR_D_X: +		*sw_mode = dm_sw_var_d_x; +		break; + +	default: +		ASSERT(0); /* Not supported */ +		break; +	} +} + +static void dc_timing_to_dml_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_dest_params_st *dest) +{ +	dest->hblank_start = timing->h_total - timing->h_front_porch; +	dest->hblank_end = dest->hblank_start +			- timing->h_addressable +			- timing->h_border_left +			- timing->h_border_right; +	dest->vblank_start = timing->v_total - timing->v_front_porch; +	dest->vblank_end = dest->vblank_start +			- timing->v_addressable +			- timing->v_border_top +			- timing->v_border_bottom; +	dest->htotal = timing->h_total; +	dest->vtotal = timing->v_total; +	dest->hactive = timing->h_addressable; +	dest->vactive = timing->v_addressable; +	dest->interlaced = timing->flags.INTERLACE; +	dest->pixel_rate_mhz = timing->pix_clk_100hz/10000.0; +	if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) +		dest->pixel_rate_mhz *= 2; +} + +static enum odm_combine_mode get_dml_odm_combine(const struct pipe_ctx *pipe) +{ +	int odm_split_count = 0; +	enum odm_combine_mode combine_mode = dm_odm_combine_mode_disabled; +	struct pipe_ctx *next_pipe = pipe->next_odm_pipe; + +	// Traverse pipe tree to determine odm split count +	while (next_pipe) { +		odm_split_count++; +		next_pipe = next_pipe->next_odm_pipe; +	} +	pipe = pipe->prev_odm_pipe; +	while (pipe) { +		odm_split_count++; +		pipe = pipe->prev_odm_pipe; +	} + +	// Translate split to DML odm combine factor +	switch (odm_split_count) { +	case 1: +		combine_mode = dm_odm_combine_mode_2to1; +		break; +	case 3: +		combine_mode = dm_odm_combine_mode_4to1; +		break; +	default: +		combine_mode = dm_odm_combine_mode_disabled; +	} + +	return combine_mode; +} + +static int get_dml_output_type(enum signal_type dc_signal) +{ +	int dml_output_type = -1; + +	switch (dc_signal) { +	case SIGNAL_TYPE_DISPLAY_PORT_MST: +	case SIGNAL_TYPE_DISPLAY_PORT: +		dml_output_type = dm_dp; +		break; +	case SIGNAL_TYPE_EDP: +		dml_output_type = dm_edp; +		break; +	case SIGNAL_TYPE_HDMI_TYPE_A: +	case SIGNAL_TYPE_DVI_SINGLE_LINK: +	case SIGNAL_TYPE_DVI_DUAL_LINK: +		dml_output_type = dm_hdmi; +		break; +	default: +		break; +	} + +	return dml_output_type; +} + +static void populate_color_depth_and_encoding_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_output_params_st *dout) +{ +	int output_bpc = 0; + +	switch (timing->display_color_depth) { +	case COLOR_DEPTH_666: +		output_bpc = 6; +		break; +	case COLOR_DEPTH_888: +		output_bpc = 8; +		break; +	case COLOR_DEPTH_101010: +		output_bpc = 10; +		break; +	case COLOR_DEPTH_121212: +		output_bpc = 12; +		break; +	case COLOR_DEPTH_141414: +		output_bpc = 14; +		break; +	case COLOR_DEPTH_161616: +		output_bpc = 16; +		break; +	case COLOR_DEPTH_999: +		output_bpc = 9; +		break; +	case COLOR_DEPTH_111111: +		output_bpc = 11; +		break; +	default: +		output_bpc = 8; +		break; +	} + +	switch (timing->pixel_encoding) { +	case PIXEL_ENCODING_RGB: +	case PIXEL_ENCODING_YCBCR444: +		dout->output_format = dm_444; +		dout->output_bpp = output_bpc * 3; +		break; +	case PIXEL_ENCODING_YCBCR420: +		dout->output_format = dm_420; +		dout->output_bpp = (output_bpc * 3.0) / 2; +		break; +	case PIXEL_ENCODING_YCBCR422: +		if (timing->flags.DSC && !timing->dsc_cfg.ycbcr422_simple) +			dout->output_format = dm_n422; +		else +			dout->output_format = dm_s422; +		dout->output_bpp = output_bpc * 2; +		break; +	default: +		dout->output_format = dm_444; +		dout->output_bpp = output_bpc * 3; +	} +} + +static enum source_format_class dc_source_format_to_dml_source_format(enum surface_pixel_format dc_format) +{ +	enum source_format_class dml_format = dm_444_32; + +	switch (dc_format) { +	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: +	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: +		dml_format = dm_420_8; +		break; +	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: +	case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: +		dml_format = dm_420_10; +		break; +	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: +	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: +	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: +		dml_format = dm_444_64; +		break; +	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: +	case SURFACE_PIXEL_FORMAT_GRPH_RGB565: +		dml_format = dm_444_16; +		break; +	case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: +		dml_format = dm_444_8; +		break; +	case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: +		dml_format = dm_rgbe_alpha; +		break; +	default: +		dml_format = dm_444_32; +		break; +	} + +	return dml_format; +} + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c index 3ee858f311d1..ec636d06e18c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c @@ -61,16 +61,6 @@ static double dsc_roundf(double num)  	return (int)(num);  } -static double dsc_ceil(double num) -{ -	double retval = (int)num; - -	if (retval != num && num > 0) -		retval = num + 1; - -	return (int)retval; -} -  static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,  		       enum max_min max_min, float bpp)  { @@ -103,7 +93,7 @@ static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,  		TABLE_CASE(420, 12, min);  	} -	if (table == 0) +	if (!table)  		return;  	index = (bpp - table[0].bpp) * 2; @@ -268,24 +258,3 @@ void _do_calc_rc_params(struct rc_params *rc,  	rc->rc_buf_thresh[13] = 8064;  } -u32 _do_bytes_per_pixel_calc(int slice_width, -		u16 drm_bpp, -		bool is_navite_422_or_420) -{ -	float bpp; -	u32 bytes_per_pixel; -	double d_bytes_per_pixel; - -	dc_assert_fp_enabled(); - -	bpp = ((float)drm_bpp / 16.0); -	d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; -	// TODO: Make sure the formula for calculating this is precise (ceiling -	// vs. floor, and at what point they should be applied) -	if (is_navite_422_or_420) -		d_bytes_per_pixel /= 2; - -	bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); - -	return bytes_per_pixel; -} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h index b93b95409fbe..cad244c023cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h @@ -78,10 +78,6 @@ struct qp_entry {  typedef struct qp_entry qp_table[]; -u32 _do_bytes_per_pixel_calc(int slice_width, -		u16 drm_bpp, -		bool is_navite_422_or_420); -  void _do_calc_rc_params(struct rc_params *rc,  		enum colour_mode cm,  		enum bits_per_comp bpc, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 0321b4446e05..9c74564cbd8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -455,6 +455,7 @@ static bool intersect_dsc_caps(  	if (pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420)  		dsc_common_caps->bpp_increment_div = min(dsc_common_caps->bpp_increment_div, (uint32_t)8); +	dsc_common_caps->edp_sink_max_bits_per_pixel = dsc_sink_caps->edp_max_bits_per_pixel;  	dsc_common_caps->is_dp = dsc_sink_caps->is_dp;  	return true;  } @@ -513,6 +514,13 @@ static bool decide_dsc_bandwidth_range(  			range->min_target_bpp_x16 = preferred_bpp_x16;  		}  	} +	/* TODO - make this value generic to all signal types */ +	else if (dsc_caps->edp_sink_max_bits_per_pixel) { +		/* apply max bpp limitation from edp sink */ +		range->max_target_bpp_x16 = MIN(dsc_caps->edp_sink_max_bits_per_pixel, +				max_bpp_x16); +		range->min_target_bpp_x16 = min_bpp_x16; +	}  	else {  		range->max_target_bpp_x16 = max_bpp_x16;  		range->min_target_bpp_x16 = min_bpp_x16; @@ -574,7 +582,7 @@ static bool decide_dsc_target_bpp_x16(  	return *target_bpp_x16 != 0;  } -#define MIN_AVAILABLE_SLICES_SIZE  4 +#define MIN_AVAILABLE_SLICES_SIZE  6  static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *available_slices)  { @@ -860,6 +868,10 @@ static bool setup_dsc_config(  		min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?  	is_dsc_possible = (min_slices_h <= max_slices_h); + +	if (min_slices_h == 0 && max_slices_h == 0) +		is_dsc_possible = false; +  	if (!is_dsc_possible)  		goto done; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index b19d3aeb5962..e97cf09be9d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -60,31 +60,3 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)  			   pps->dsc_version_minor);  	DC_FP_END();  } - -/** - * calc_dsc_bytes_per_pixel - calculate bytes per pixel - * @pps: DRM struct with all required DSC values - * - * Based on the information inside drm_dsc_config, this function calculates the - * total of bytes per pixel. - * - * @note This calculation requires float point operation, most of it executes - * under kernel_fpu_{begin,end}. - * - * Return: - * Return the number of bytes per pixel - */ -u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps) - -{ -	u32 ret; -	u16 drm_bpp = pps->bits_per_pixel; -	int slice_width  = pps->slice_width; -	bool is_navite_422_or_420 = pps->native_422 || pps->native_420; - -	DC_FP_START(); -	ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp, -				       is_navite_422_or_420); -	DC_FP_END(); -	return ret; -} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index c2340e001b57..80921c1c0d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -30,7 +30,6 @@  #include "dml/dsc/rc_calc_fpu.h"  void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps); -u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps);  #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 1e19dd674e5a..7e306aa3e2b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -100,8 +100,7 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par  	int              ret;  	struct rc_params rc;  	struct drm_dsc_config   dsc_cfg; - -	dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps); +	unsigned long long tmp;  	calc_rc_params(&rc, pps);  	dsc_params->pps = *pps; @@ -113,6 +112,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par  	dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;  	ret = drm_dsc_compute_rc_parameters(&dsc_cfg); +	tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1); +	do_div(tmp, (uint32_t)dsc_cfg.slice_width);  //ROUND-UP +	dsc_params->bytes_per_pixel = (uint32_t)tmp;  	copy_pps_fields(&dsc_params->pps, &dsc_cfg);  	dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index d34b0b0eea65..444182a97e6e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -53,6 +53,8 @@ enum dc_status {  	DC_NOT_SUPPORTED = 24,  	DC_UNSUPPORTED_VALUE = 25, +	DC_NO_LINK_ENC_RESOURCE = 26, +  	DC_ERROR_UNEXPECTED = -1  }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 6fc6488c54c0..890280026e69 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -334,6 +334,20 @@ struct plane_resource {  	struct dcn_fe_bandwidth bw;  }; +#if defined(CONFIG_DRM_AMD_DC_DCN) +#define LINK_RES_HPO_DP_REC_MAP__MASK 0xFFFF +#define LINK_RES_HPO_DP_REC_MAP__SHIFT 0 +#endif + +/* all mappable hardware resources used to enable a link */ +struct link_resource { +#if defined(CONFIG_DRM_AMD_DC_DCN) +	struct hpo_dp_link_encoder *hpo_dp_link_enc; +#else +	void *dummy; +#endif +}; +  union pipe_update_flags {  	struct {  		uint32_t enable : 1; @@ -361,6 +375,7 @@ struct pipe_ctx {  	struct plane_resource plane_res;  	struct stream_resource stream_res; +	struct link_resource link_res;  	struct clock_source *clock_source; @@ -411,6 +426,8 @@ struct resource_context {  	struct link_enc_cfg_context link_enc_cfg_ctx;  #if defined(CONFIG_DRM_AMD_DC_DCN)  	bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS]; +	unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; +	int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS];  #endif  #if defined(CONFIG_DRM_AMD_DC_DCN)  	bool is_mpc_3dlut_acquired[MAX_PIPES]; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index a6d3d859754a..cd52813a8432 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -56,16 +56,19 @@ enum {  bool dp_verify_link_cap(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct dc_link_settings *known_limit_link_setting,  	int *fail_count);  bool dp_verify_link_cap_with_retries(  	struct dc_link *link, +	const struct link_resource *link_res,  	struct dc_link_settings *known_limit_link_setting,  	int attempts);  bool dp_verify_mst_link_cap( -	struct dc_link *link); +	struct dc_link *link, +	const struct link_resource *link_res);  bool dp_validate_mode_timing(  	struct dc_link *link, @@ -168,8 +171,9 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(  	struct dc_link *link,  	enum dc_dp_training_pattern pattern); -enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready); +enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready);  void dp_set_fec_enable(struct dc_link *link, bool enable); +struct link_encoder *dp_get_link_enc(struct dc_link *link);  bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);  bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update);  void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); @@ -210,11 +214,16 @@ bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link);  struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(  		const struct dc_stream_state *stream,  		const struct dc_link *link); -void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings); -void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal); +void enable_dp_hpo_output(struct dc_link *link, +		const struct link_resource *link_res, +		const struct dc_link_settings *link_settings); +void disable_dp_hpo_output(struct dc_link *link, +		const struct link_resource *link_res, +		enum signal_type signal);  void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);  bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);  void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link);  bool dp_retrieve_lttpr_cap(struct dc_link *link); +void edp_panel_backlight_power_on(struct dc_link *link);  #endif /* __DC_LINK_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h index 974d703e3771..74dafd0f9d3d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h @@ -91,8 +91,9 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);   * DPIA equivalent of dc_link_dp_perfrorm_link_training.   * Aborts link training upon detection of sink unplug.   */ -enum link_training_result -dc_link_dpia_perform_link_training(struct dc_link *link, +enum link_training_result dc_link_dpia_perform_link_training( +	struct dc_link *link, +	const struct link_resource *link_res,  	const struct dc_link_settings *link_setting,  	bool skip_video_pattern); diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index 806f3041db14..337c0161e72d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -619,7 +619,7 @@ struct dcn_ip_params {  };  extern const struct dcn_ip_params dcn10_ip_defaults; -bool dcn_validate_bandwidth( +bool dcn10_validate_bandwidth(  		struct dc *dc,  		struct dc_state *context,  		bool fast_validate); diff --git a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h new file mode 100644 index 000000000000..5dcfbd8e2697 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h @@ -0,0 +1,34 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DML_WRAPPER_H_ +#define DML_WRAPPER_H_ + +#include "dc.h" +#include "dml/display_mode_vba.h" + +bool dml_validate(struct dc *dc, struct dc_state *context, bool fast_validate); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index a17e5de3b100..c920c4b6077d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -211,6 +211,8 @@ struct dummy_pstate_entry {  struct clk_bw_params {  	unsigned int vram_type;  	unsigned int num_channels; + 	unsigned int dispclk_vco_khz; +	unsigned int dc_mode_softmax_memclk;  	struct clk_limit_table clk_table;  	struct wm_table wm_table;  	struct dummy_pstate_entry dummy_pstate_table[4]; @@ -261,6 +263,10 @@ struct clk_mgr_funcs {  	/* Send message to PMFW to set hard max memclk frequency to highest DPM */  	void (*set_hard_max_memclk)(struct clk_mgr *clk_mgr); +	/* Custom set a memclk freq range*/ +	void (*set_max_memclk)(struct clk_mgr *clk_mgr, unsigned int memclk_mhz); +	void (*set_min_memclk)(struct clk_mgr *clk_mgr, unsigned int memclk_mhz); +  	/* Get current memclk states from PMFW, update relevant structures */  	void (*get_memclk_states_from_smu)(struct clk_mgr *clk_mgr); @@ -274,6 +280,7 @@ struct clk_mgr {  	struct dc_clocks clks;  	bool psr_allow_active_cache;  	bool force_smu_not_present; +	bool dc_mode_softmax_enabled;  	int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes  	int dentist_vco_freq_khz;  	struct clk_state_registers_and_bypass boot_snapshot; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index f94135c6e3c2..346f0ba73e86 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -61,6 +61,8 @@ struct dcn_dsc_state {  	uint32_t dsc_pic_height;  	uint32_t dsc_slice_bpg_offset;  	uint32_t dsc_chunk_size; +	uint32_t dsc_fw_en; +	uint32_t dsc_opp_source;  }; @@ -88,6 +90,7 @@ struct dsc_enc_caps {  	int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */  	int32_t max_slice_width;  	uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ +	uint32_t edp_sink_max_bits_per_pixel;  	bool is_dp;  }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 80e1a32bc63d..2c031586f4e6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -139,6 +139,7 @@ struct hubp_funcs {  	bool (*hubp_is_flip_pending)(struct hubp *hubp);  	void (*set_blank)(struct hubp *hubp, bool blank); +	void (*set_blank_regs)(struct hubp *hubp, bool blank);  	void (*set_hubp_blank_en)(struct hubp *hubp, bool blank);  	void (*set_cursor_attributes)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index bb0e91756ddd..2ce15cd10d80 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -268,7 +268,8 @@ struct hpo_dp_link_encoder_funcs {  	void (*enable_link_phy)(struct hpo_dp_link_encoder *enc,  		const struct dc_link_settings *link_settings, -		enum transmitter transmitter); +		enum transmitter transmitter, +		enum hpd_source_id hpd_source);  	void (*disable_link_phy)(struct hpo_dp_link_encoder *link_enc,  		enum signal_type signal); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index c88e113b94d1..073f8b667eff 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -164,6 +164,10 @@ struct stream_encoder_funcs {  	void (*stop_dp_info_packets)(  		struct stream_encoder *enc); +	void (*reset_fifo)( +		struct stream_encoder *enc +	); +  	void (*dp_blank)(  		struct dc_link *link,  		struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 7390baf916b5..c29320b3855d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -290,6 +290,8 @@ struct timing_generator_funcs {  			       enum optc_dsc_mode dsc_mode,  			       uint32_t dsc_bytes_per_pixel,  			       uint32_t dsc_slice_width); +	void (*get_dsc_status)(struct timing_generator *optc, +					uint32_t *dsc_mode);  	void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing);  	void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt,  			struct dc_crtc_timing *timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index d50f4bd06b5d..05053f3b4ab7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -64,6 +64,7 @@ struct hw_sequencer_funcs {  	enum dc_status (*apply_ctx_to_hw)(struct dc *dc,  			struct dc_state *context);  	void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx); +	void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);  	void (*apply_ctx_for_surface)(struct dc *dc,  			const struct dc_stream_state *stream,  			int num_planes, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h index 10dcf6a5e9b1..a4e43b4826e0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h @@ -36,7 +36,7 @@   * Initialise link encoder resource tracking.   */  void link_enc_cfg_init( -		struct dc *dc, +		const struct dc *dc,  		struct dc_state *state);  /* diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index ba664bc49595..69d63763a10e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -32,6 +32,7 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,  void dp_enable_link_phy(  	struct dc_link *link, +	const struct link_resource *link_res,  	enum signal_type signal,  	enum clock_source_id clock_source,  	const struct dc_link_settings *link_settings); @@ -42,22 +43,27 @@ void edp_add_delay_for_T9(struct dc_link *link);  bool edp_receiver_ready_T9(struct dc_link *link);  bool edp_receiver_ready_T7(struct dc_link *link); -void dp_disable_link_phy(struct dc_link *link, enum signal_type signal); +void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res, +		enum signal_type signal); -void dp_disable_link_phy_mst(struct dc_link *link, enum signal_type signal); +void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res, +		enum signal_type signal);  bool dp_set_hw_training_pattern(  	struct dc_link *link, +	const struct link_resource *link_res,  	enum dc_dp_training_pattern pattern,  	uint32_t offset);  void dp_set_hw_lane_settings(  	struct dc_link *link, +	const struct link_resource *link_res,  	const struct link_training_settings *link_settings,  	uint32_t offset);  void dp_set_hw_test_pattern(  	struct dc_link *link, +	const struct link_resource *link_res,  	enum dp_test_pattern test_pattern,  	uint8_t *custom_pattern,  	uint32_t custom_pattern_size); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 372c0898facd..4249bf306e09 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -202,8 +202,12 @@ int get_num_mpc_splits(struct pipe_ctx *pipe);  int get_num_odm_splits(struct pipe_ctx *pipe);  #if defined(CONFIG_DRM_AMD_DC_DCN) -struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder( -		const struct resource_pool *pool); +struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( +		const struct resource_context *res_ctx, +		const struct resource_pool *pool, +		const struct dc_link *link);  #endif +uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter); +  #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c index 378cc11aa047..6b5fedd9ace0 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c @@ -185,16 +185,18 @@ bool dal_irq_service_dummy_set(struct irq_service *irq_service,  			       const struct irq_source_info *info,  			       bool enable)  { -	DC_LOG_ERROR("%s: called for non-implemented irq source\n", -		     __func__); +	DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n", +		     __func__, info->src_id, info->ext_id); +  	return false;  }  bool dal_irq_service_dummy_ack(struct irq_service *irq_service,  			       const struct irq_source_info *info)  { -	DC_LOG_ERROR("%s: called for non-implemented irq source\n", -		     __func__); +	DC_LOG_ERROR("%s: called for non-implemented irq source, src_id=%u, ext_id=%u\n", +		     __func__, info->src_id, info->ext_id); +  	return false;  } diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c index 34f43cb650f8..cf072e2347d3 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c @@ -40,10 +40,9 @@  #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn10( -		struct irq_service *irq_service, -		uint32_t src_id, -		uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn10(struct irq_service *irq_service, +						  uint32_t src_id, +						  uint32_t ext_id)  {  	switch (src_id) {  	case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 9ccafe007b23..c4b067d01895 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -132,31 +132,6 @@ enum dc_irq_source to_dal_irq_source_dcn20(  	}  } -uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source) -{ -	const struct irq_source_info *info; -	uint32_t addr; -	uint32_t value; -	uint32_t current_status; - -	info = find_irq_source_info(irq_service, source); -	if (!info) -		return 0; - -	addr = info->status_reg; -	if (!addr) -		return 0; - -	value = dm_read_reg(irq_service->ctx, addr); -	current_status = -		get_reg_field_value( -			value, -			HPD0_DC_HPD_INT_STATUS, -			DC_HPD_SENSE); - -	return current_status; -} -  static bool hpd_ack(  	struct irq_service *irq_service,  	const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h index 4d69ab24ca25..aee4b37999f1 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h @@ -31,6 +31,4 @@  struct irq_service *dal_irq_service_dcn20_create(  	struct irq_service_init_data *init_data); -uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source); -  #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c index a47f68634fc3..aa708b61142f 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c @@ -39,10 +39,9 @@  #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn201( -		struct irq_service *irq_service, -		uint32_t src_id, -		uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn201(struct irq_service *irq_service, +						   uint32_t src_id, +						   uint32_t ext_id)  {  	switch (src_id) {  	case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 78940cb20e10..0f15bcada4e9 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -40,10 +40,9 @@  #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn21( -		struct irq_service *irq_service, -		uint32_t src_id, -		uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_service, +						  uint32_t src_id, +						  uint32_t ext_id)  {  	switch (src_id) {  	case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: @@ -135,31 +134,6 @@ enum dc_irq_source to_dal_irq_source_dcn21(  	return DC_IRQ_SOURCE_INVALID;  } -uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source) -{ -	const struct irq_source_info *info; -	uint32_t addr; -	uint32_t value; -	uint32_t current_status; - -	info = find_irq_source_info(irq_service, source); -	if (!info) -		return 0; - -	addr = info->status_reg; -	if (!addr) -		return 0; - -	value = dm_read_reg(irq_service->ctx, addr); -	current_status = -		get_reg_field_value( -			value, -			HPD0_DC_HPD_INT_STATUS, -			DC_HPD_SENSE); - -	return current_status; -} -  static bool hpd_ack(  	struct irq_service *irq_service,  	const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h index 616470e32380..da2bd0e93d7a 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h @@ -31,6 +31,4 @@  struct irq_service *dal_irq_service_dcn21_create(  	struct irq_service_init_data *init_data); -uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source); -  #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c index 38e0ade60c7b..1b88e4e627fd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c @@ -36,10 +36,9 @@  #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" -enum dc_irq_source to_dal_irq_source_dcn31( -		struct irq_service *irq_service, -		uint32_t src_id, -		uint32_t ext_id) +static enum dc_irq_source to_dal_irq_source_dcn31(struct irq_service *irq_service, +						  uint32_t src_id, +						  uint32_t ext_id)  {  	switch (src_id) {  	case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP: diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index 4db1133e4466..a2a4fbeb83f8 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)  	*irq_service = NULL;  } -const struct irq_source_info *find_irq_source_info( +static const struct irq_source_info *find_irq_source_info(  	struct irq_service *irq_service,  	enum dc_irq_source source)  { diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h index e60b82480093..dbfcb096eedd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h @@ -69,10 +69,6 @@ struct irq_service {  	const struct irq_service_funcs *funcs;  }; -const struct irq_source_info *find_irq_source_info( -	struct irq_service *irq_service, -	enum dc_irq_source source); -  void dal_irq_service_construct(  	struct irq_service *irq_service,  	struct irq_service_init_data *init_data); diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index cd204eef073b..83855b8a32e9 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -360,6 +360,8 @@ struct dmub_srv_hw_funcs {  	uint32_t (*get_gpint_dataout)(struct dmub_srv *dmub); +	void (*clear_inbox0_ack_register)(struct dmub_srv *dmub); +	uint32_t (*read_inbox0_ack_register)(struct dmub_srv *dmub);  	void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);  	uint32_t (*get_current_time)(struct dmub_srv *dmub); @@ -409,6 +411,7 @@ struct dmub_srv {  	struct dmub_srv_base_funcs funcs;  	struct dmub_srv_hw_funcs hw_funcs;  	struct dmub_rb inbox1_rb; +	uint32_t inbox1_last_wptr;  	/**  	 * outbox1_rb is accessed without locks (dal & dc)  	 * and to be used only in dmub_srv_stat_get_notification() @@ -735,6 +738,45 @@ bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_  bool dmub_srv_should_detect(struct dmub_srv *dmub); +/** + * dmub_srv_send_inbox0_cmd() - Send command to DMUB using INBOX0 + * @dmub: the dmub service + * @data: the data to be sent in the INBOX0 command + * + * Send command by writing directly to INBOX0 WPTR + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_INVALID - hw_init false or hw function does not exist + */ +enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data); + +/** + * dmub_srv_wait_for_inbox0_ack() - wait for DMUB to ACK INBOX0 command + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Wait for DMUB to ACK the INBOX0 message + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_INVALID - hw_init false or hw function does not exist + *   DMUB_STATUS_TIMEOUT - wait for ack timed out + */ +enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us); + +/** + * dmub_srv_wait_for_inbox0_ack() - clear ACK register for INBOX0 + * @dmub: the dmub service + * + * Clear ACK register for INBOX0 + * + * Return: + *   DMUB_STATUS_OK - success + *   DMUB_STATUS_INVALID - hw_init false or hw function does not exist + */ +enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub); +  #if defined(__cplusplus)  }  #endif diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index c29a67ccef17..873ecd04e01d 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@  /* Firmware versioning. */  #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e +#define DMUB_FW_VERSION_GIT_HASH 0xbaf06b95  #define DMUB_FW_VERSION_MAJOR 0  #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 91 +#define DMUB_FW_VERSION_REVISION 98  #define DMUB_FW_VERSION_TEST 0  #define DMUB_FW_VERSION_VBIOS 0  #define DMUB_FW_VERSION_HOTFIX 0 @@ -173,13 +173,6 @@ extern "C" {  #endif  /** - * Number of nanoseconds per DMUB tick. - * DMCUB_TIMER_CURRENT increments in DMUB ticks, which are 10ns by default. - * If DMCUB_TIMER_WINDOW is non-zero this will no longer be true. - */ -#define NS_PER_DMUB_TICK 10 - -/**   * union dmub_addr - DMUB physical/virtual 64-bit address.   */  union dmub_addr { @@ -208,10 +201,9 @@ union dmub_psr_debug_flags {  		uint32_t use_hw_lock_mgr : 1;  		/** -		 * Unused. -		 * TODO: Remove. +		 * Use TPS3 signal when restore main link.  		 */ -		uint32_t log_line_nums : 1; +		uint32_t force_wakeup_by_tps3 : 1;  	} bitfields;  	/** @@ -416,7 +408,14 @@ enum dmub_cmd_vbios_type {  	 * Enables or disables power gating.  	 */  	DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING = 3, +	/** +	 * Controls embedded panels. +	 */  	DMUB_CMD__VBIOS_LVTMA_CONTROL = 15, +	/** +	 * Query DP alt status on a transmitter. +	 */ +	DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT  = 26,  };  //============================================================================== @@ -1550,10 +1549,14 @@ struct dmub_cmd_psr_copy_settings_data {  	 * Currently the support is only for 0 or 1  	 */  	uint8_t panel_inst; +	/* +	 * DSC enable status in driver +	 */ +	uint8_t dsc_enable_status;  	/** -	 * Explicit padding to 4 byte boundary. +	 * Explicit padding to 3 byte boundary.  	 */ -	uint8_t pad3[4]; +	uint8_t pad3[3];  };  /** @@ -2398,6 +2401,24 @@ struct dmub_rb_cmd_lvtma_control {  };  /** + * Data passed in/out in a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. + */ +struct dmub_rb_cmd_transmitter_query_dp_alt_data { +	uint8_t phy_id; /**< 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4=UNIPHYE, 5=UNIPHYF */ +	uint8_t is_usb; /**< is phy is usb */ +	uint8_t is_dp_alt_disable; /**< is dp alt disable */ +	uint8_t is_dp4; /**< is dp in 4 lane */ +}; + +/** + * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. + */ +struct dmub_rb_cmd_transmitter_query_dp_alt { +	struct dmub_cmd_header header; /**< header */ +	struct dmub_rb_cmd_transmitter_query_dp_alt_data data; /**< payload */ +}; + +/**   * Maximum number of bytes a chunk sent to DMUB for parsing   */  #define DMUB_EDID_CEA_DATA_CHUNK_BYTES 8 @@ -2408,7 +2429,7 @@ struct dmub_rb_cmd_lvtma_control {  struct dmub_cmd_send_edid_cea {  	uint16_t offset;	/**< offset into the CEA block */  	uint8_t length;	/**< number of bytes in payload to copy as part of CEA block */ -	uint16_t total_length;  /**< total length of the CEA block */ +	uint16_t cea_total_length;  /**< total length of the CEA block */  	uint8_t payload[DMUB_EDID_CEA_DATA_CHUNK_BYTES]; /**< data chunk of the CEA block */  	uint8_t pad[3]; /**< padding and for future expansion */  }; @@ -2605,6 +2626,10 @@ union dmub_rb_cmd {  	 */  	struct dmub_rb_cmd_lvtma_control lvtma_control;  	/** +	 * Definition of a DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT command. +	 */ +	struct dmub_rb_cmd_transmitter_query_dp_alt query_dp_alt; +	/**  	 * Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command.  	 */  	struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control; @@ -2722,7 +2747,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)  static inline bool dmub_rb_push_front(struct dmub_rb *rb,  				      const union dmub_rb_cmd *cmd)  { -	uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t); +	uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt);  	const uint64_t *src = (const uint64_t *)cmd;  	uint8_t i; @@ -2840,7 +2865,7 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb,  static inline bool dmub_rb_out_front(struct dmub_rb *rb,  				 union dmub_rb_out_cmd *cmd)  { -	const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t); +	const uint64_t volatile *src = (const uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->rptr);  	uint64_t *dst = (uint64_t *)cmd;  	uint8_t i; @@ -2888,7 +2913,7 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)  	uint32_t wptr = rb->wrpt;  	while (rptr != wptr) { -		uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t); +		uint64_t volatile *data = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rptr);  		//uint64_t volatile *p = (uint64_t volatile *)data;  		uint64_t temp;  		uint8_t i; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 56d400ffa7ac..9280f2abd973 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -100,24 +100,9 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb)  }  static const struct dmub_fw_meta_info * -dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) +dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset)  {  	const union dmub_fw_meta *meta; -	const uint8_t *blob = NULL; -	uint32_t blob_size = 0; -	uint32_t meta_offset = 0; - -	if (params->fw_bss_data && params->bss_data_size) { -		/* Legacy metadata region. */ -		blob = params->fw_bss_data; -		blob_size = params->bss_data_size; -		meta_offset = DMUB_FW_META_OFFSET; -	} else if (params->fw_inst_const && params->inst_const_size) { -		/* Combined metadata region. */ -		blob = params->fw_inst_const; -		blob_size = params->inst_const_size; -		meta_offset = 0; -	}  	if (!blob || !blob_size)  		return NULL; @@ -134,6 +119,32 @@ dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)  	return &meta->info;  } +static const struct dmub_fw_meta_info * +dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) +{ +	const struct dmub_fw_meta_info *info = NULL; + +	if (params->fw_bss_data && params->bss_data_size) { +		/* Legacy metadata region. */ +		info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data, +						       params->bss_data_size, +						       DMUB_FW_META_OFFSET); +	} else if (params->fw_inst_const && params->inst_const_size) { +		/* Combined metadata region - can be aligned to 16-bytes. */ +		uint32_t i; + +		for (i = 0; i < 16; ++i) { +			info = dmub_get_fw_meta_info_from_blob( +				params->fw_inst_const, params->inst_const_size, i); + +			if (info) +				break; +		} +	} + +	return info; +} +  static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)  {  	struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; @@ -598,6 +609,8 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,  enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)  { +	struct dmub_rb flush_rb; +  	if (!dmub->hw_init)  		return DMUB_STATUS_INVALID; @@ -606,9 +619,14 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub)  	 * been flushed to framebuffer memory. Otherwise DMCUB might  	 * read back stale, fully invalid or partially invalid data.  	 */ -	dmub_rb_flush_pending(&dmub->inbox1_rb); +	flush_rb = dmub->inbox1_rb; +	flush_rb.rptr = dmub->inbox1_last_wptr; +	dmub_rb_flush_pending(&flush_rb); + +	dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); + +	dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; -		dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt);  	return DMUB_STATUS_OK;  } @@ -831,3 +849,38 @@ bool dmub_srv_should_detect(struct dmub_srv *dmub)  	return dmub->hw_funcs.should_detect(dmub);  } + +enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub) +{ +	if (!dmub->hw_init || !dmub->hw_funcs.clear_inbox0_ack_register) +		return DMUB_STATUS_INVALID; + +	dmub->hw_funcs.clear_inbox0_ack_register(dmub); +	return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us) +{ +	uint32_t i = 0; +	uint32_t ack = 0; + +	if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register) +		return DMUB_STATUS_INVALID; + +	for (i = 0; i <= timeout_us; i++) { +		ack = dmub->hw_funcs.read_inbox0_ack_register(dmub); +		if (ack) +			return DMUB_STATUS_OK; +	} +	return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, +		union dmub_inbox0_data_register data) +{ +	if (!dmub->hw_init || !dmub->hw_funcs.send_inbox0_cmd) +		return DMUB_STATUS_INVALID; + +	dmub->hw_funcs.send_inbox0_cmd(dmub, data); +	return DMUB_STATUS_OK; +} diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 4de59b66bb1a..a2b80514d83e 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -35,6 +35,7 @@  #define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C  #define DP_BRANCH_DEVICE_ID_006037 0x006037 +#define DP_DEVICE_ID_38EC11 0x38EC11  enum ddc_result {  	DDC_RESULT_UNKNOWN = 0,  	DDC_RESULT_SUCESSFULL, @@ -117,4 +118,7 @@ struct av_sync_data {  	uint8_t aud_del_ins3;/* DPCD 0002Dh */  }; +static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0}; +static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0}; +  #endif /* __DAL_DDC_SERVICE_TYPES_H__ */ diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 370fad883e33..f093b49c5e6e 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -72,9 +72,7 @@  #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__)  #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__)  #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) -#if defined(CONFIG_DRM_AMD_DC_DCN)  #define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__) -#endif  struct dal_logger; @@ -126,9 +124,7 @@ enum dc_log_type {  	LOG_MAX_HW_POINTS,  	LOG_ALL_TF_CHANNELS,  	LOG_SAMPLE_1DLUT, -#if defined(CONFIG_DRM_AMD_DC_DCN)  	LOG_DP2, -#endif  	LOG_SECTION_TOTAL_COUNT  }; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index 6d648c889866..f7420c3f5672 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -104,6 +104,7 @@ struct mod_hdcp_displayport {  	uint8_t rev;  	uint8_t assr_enabled;  	uint8_t mst_enabled; +	uint8_t usb4_enabled;  };  struct mod_hdcp_hdmi { @@ -249,7 +250,6 @@ struct mod_hdcp_link {  	uint8_t ddc_line;  	uint8_t link_enc_idx;  	uint8_t phy_idx; -	uint8_t dio_output_type;  	uint8_t dio_output_id;  	uint8_t hdcp_supported_informational;  	union {  | 
