summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/display/amdgpu_dm
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2023-08-04 11:10:18 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2023-08-04 11:10:18 +0200
commit3d00c59d147724e536b415e389445ece6fcda42f (patch)
treeb7abf11faad68372dfd889eb644a825f4cdb19a2 /drivers/gpu/drm/amd/display/amdgpu_dm
parent52920704df878050123dfeb469aa6ab8022547c1 (diff)
parent7ea1db28119e237d634c6f74ba52056939c009ad (diff)
Merge tag 'amd-drm-next-6.6-2023-07-28' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.6-2023-07-28: amdgpu: - Lots of checkpatch cleanups - GFX 9.4.3 updates - Add USB PD and IFWI flashing documentation - GPUVM updates - RAS fixes - DRR fixes - FAMS fixes - Virtual display fixes - Soft IH fixes - SMU13 fixes - Rework PSP firmware loading for other IPs - Kernel doc fixes - DCN 3.0.1 fixes - LTTPR fixes - DP MST fixes - DCN 3.1.6 fixes - SubVP fixes - Display bandwidth calculation fixes - VCN4 secure submission fixes - Allow building DC on RISC-V - Add visible FB info to bo_print_info - HBR3 fixes - Add PSP 14.0 support - GFX9 MCBP fix - GMC10 vmhub index fix - GMC11 vmhub index fix - Create a new doorbell manager - SR-IOV fixes amdkfd: - Cleanup CRIU dma-buf handling - Use KIQ to unmap HIQ - GFX 9.4.3 debugger updates - GFX 9.4.2 debugger fixes - Enable cooperative groups fof gfx11 - SVM fixes radeon: - Lots of checkpatch cleanups Merge conflicts: - drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c The switch to drm eu helpers in 8a206685d36f ("drm/amdgpu: use drm_exec for GEM and CSA handling v2") clashed with the cosmetic cleanups from 30953c4d000b ("drm/amdgpu: Fix style issues in amdgpu_gem.c"). I kept the former since the cleanup up code is gone. - drivers/gpu/drm/amd/amdgpu/atom.c. adf64e214280 ("drm/amd: Avoid reading the VBIOS part number twice") removed code that 992b8fe106ab ("drm/radeon: Replace all non-returning strlcpy with strscpy") polished. From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20230728214228.8102-1-alexander.deucher@amd.com [sima: some merge conflict wrangling as noted] Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c456
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c185
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c33
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c125
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c1
13 files changed, 539 insertions, 418 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ff0a217b9d56..d96b6eda3320 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -245,51 +245,52 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
*/
static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
+ struct amdgpu_crtc *acrtc = NULL;
+
if (crtc >= adev->mode_info.num_crtc)
return 0;
- else {
- struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
- if (acrtc->dm_irq_params.stream == NULL) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
- crtc);
- return 0;
- }
+ acrtc = adev->mode_info.crtcs[crtc];
- return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
}
+
+ return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
}
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
u32 v_blank_start, v_blank_end, h_position, v_position;
+ struct amdgpu_crtc *acrtc = NULL;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
- else {
- struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
- if (acrtc->dm_irq_params.stream == NULL) {
- DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
- crtc);
- return 0;
- }
-
- /*
- * TODO rework base driver to use values directly.
- * for now parse it back into reg-format
- */
- dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
- &v_blank_start,
- &v_blank_end,
- &h_position,
- &v_position);
+ acrtc = adev->mode_info.crtcs[crtc];
- *position = v_position | (h_position << 16);
- *vbl = v_blank_start | (v_blank_end << 16);
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
}
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+
return 0;
}
@@ -424,12 +425,12 @@ static void dm_pflip_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
- amdgpu_crtc->pflip_status,
- AMDGPU_FLIP_SUBMITTED,
- amdgpu_crtc->crtc_id,
- amdgpu_crtc);
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
+ amdgpu_crtc->pflip_status,
+ AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id,
+ amdgpu_crtc);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return;
}
@@ -883,7 +884,7 @@ static int dm_set_powergating_state(void *handle,
}
/* Prototypes of private functions */
-static int dm_early_init(void* handle);
+static int dm_early_init(void *handle);
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
@@ -1282,7 +1283,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
- pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
+ pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
@@ -1347,6 +1348,15 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
if (amdgpu_in_reset(adev))
goto skip;
+ if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ goto skip;
+ }
+
mutex_lock(&adev->dm.dc_lock);
if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dc_link_dp_handle_automated_test(dc_link);
@@ -1365,8 +1375,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
DP_TEST_RESPONSE,
&test_response.raw,
sizeof(test_response));
- }
- else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
/* offload_work->data is from handle_hpd_rx_irq->
@@ -1554,7 +1563,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
- if(amdgpu_dm_irq_init(adev)) {
+ if (amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error;
}
@@ -1696,9 +1705,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
- if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
adev->dm.dc->debug.disable_dsc = true;
- }
if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
adev->dm.dc->debug.disable_clock_gate = true;
@@ -1942,8 +1950,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
mutex_destroy(&adev->dm.audio_lock);
mutex_destroy(&adev->dm.dc_lock);
mutex_destroy(&adev->dm.dpia_aux_lock);
-
- return;
}
static int load_dmcu_fw(struct amdgpu_device *adev)
@@ -1952,7 +1958,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
int r;
const struct dmcu_firmware_header_v1_0 *hdr;
- switch(adev->asic_type) {
+ switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
case CHIP_TAHITI:
case CHIP_PITCAIRN:
@@ -2709,7 +2715,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
struct dc_scaling_info scaling_infos[MAX_SURFACES];
struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
- } * bundle;
+ } *bundle;
int k, m;
bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
@@ -2739,8 +2745,6 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
cleanup:
kfree(bundle);
-
- return;
}
static int dm_resume(void *handle)
@@ -2954,8 +2958,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
.set_powergating_state = dm_set_powergating_state,
};
-const struct amdgpu_ip_block_version dm_ip_block =
-{
+const struct amdgpu_ip_block_version dm_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 1,
.minor = 0,
@@ -3000,9 +3003,12 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
- if (caps->ext_caps->bits.oled == 1 /*||
- caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
- caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
+ if (caps->ext_caps->bits.oled == 1
+ /*
+ * ||
+ * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ * caps->ext_caps->bits.hdr_aux_backlight_control == 1
+ */)
caps->aux_support = true;
if (amdgpu_backlight == 0)
@@ -3236,86 +3242,6 @@ static void handle_hpd_irq(void *param)
}
-static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
-{
- u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
- u8 dret;
- bool new_irq_handled = false;
- int dpcd_addr;
- int dpcd_bytes_to_read;
-
- const int max_process_count = 30;
- int process_count = 0;
-
- const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
-
- if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
- dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
- /* DPCD 0x200 - 0x201 for downstream IRQ */
- dpcd_addr = DP_SINK_COUNT;
- } else {
- dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
- /* DPCD 0x2002 - 0x2005 for downstream IRQ */
- dpcd_addr = DP_SINK_COUNT_ESI;
- }
-
- dret = drm_dp_dpcd_read(
- &aconnector->dm_dp_aux.aux,
- dpcd_addr,
- esi,
- dpcd_bytes_to_read);
-
- while (dret == dpcd_bytes_to_read &&
- process_count < max_process_count) {
- u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
- u8 retry;
- dret = 0;
-
- process_count++;
-
- DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
- /* handle HPD short pulse irq */
- if (aconnector->mst_mgr.mst_state)
- drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
- esi,
- ack,
- &new_irq_handled);
-
- if (new_irq_handled) {
- /* ACK at DPCD to notify down stream */
- for (retry = 0; retry < 3; retry++) {
- ssize_t wret;
-
- wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
- dpcd_addr + 1,
- ack[1]);
- if (wret == 1)
- break;
- }
-
- if (retry == 3) {
- DRM_ERROR("Failed to ack MST event.\n");
- return;
- }
-
- drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
- /* check if there is new irq to be handled */
- dret = drm_dp_dpcd_read(
- &aconnector->dm_dp_aux.aux,
- dpcd_addr,
- esi,
- dpcd_bytes_to_read);
-
- new_irq_handled = false;
- } else {
- break;
- }
- }
-
- if (process_count == max_process_count)
- DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
-}
-
static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
union hpd_irq_data hpd_irq_data)
{
@@ -3377,7 +3303,23 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
- dm_handle_mst_sideband_msg(aconnector);
+ bool skip = false;
+
+ /*
+ * DOWN_REP_MSG_RDY is also handled by polling method
+ * mgr->cbs->poll_hpd_irq()
+ */
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_mst_msg_rdy_event;
+
+ if (!skip)
+ offload_wq->is_handling_mst_msg_rdy_event = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
goto out;
}
@@ -3468,7 +3410,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
aconnector = to_amdgpu_dm_connector(connector);
dc_link = aconnector->dc_link;
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
int_params.irq_source = dc_link->irq_source_hpd;
@@ -3477,7 +3419,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
(void *) aconnector);
}
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
/* Also register for DP short pulse (hpd_rx). */
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
@@ -3486,11 +3428,11 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq,
(void *) aconnector);
-
- if (adev->dm.hpd_rx_offload_wq)
- adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
- aconnector;
}
+
+ if (adev->dm.hpd_rx_offload_wq)
+ adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+ aconnector;
}
}
@@ -3503,7 +3445,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0};
int r;
int i;
- unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -3517,11 +3459,12 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
- * for acknowledging and handling. */
+ * for acknowledging and handling.
+ */
/* Use VBLANK interrupt */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
if (r) {
DRM_ERROR("Failed to add crtc irq id!\n");
return r;
@@ -3529,7 +3472,7 @@ static int dce60_register_irq_handlers(struct amdgpu_device *adev)
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
- dc_interrupt_to_irq_source(dc, i+1 , 0);
+ dc_interrupt_to_irq_source(dc, i + 1, 0);
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
@@ -3585,7 +3528,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
struct dc_interrupt_params int_params = {0};
int r;
int i;
- unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
if (adev->family >= AMDGPU_FAMILY_AI)
client_id = SOC15_IH_CLIENTID_DCE;
@@ -3602,7 +3545,8 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
- * for acknowledging and handling. */
+ * for acknowledging and handling.
+ */
/* Use VBLANK interrupt */
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
@@ -4049,7 +3993,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
}
static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
- unsigned *min, unsigned *max)
+ unsigned int *min, unsigned int *max)
{
if (!caps)
return 0;
@@ -4069,7 +4013,7 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness)
{
- unsigned min, max;
+ unsigned int min, max;
if (!get_brightness_range(caps, &min, &max))
return brightness;
@@ -4082,7 +4026,7 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c
static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
uint32_t brightness)
{
- unsigned min, max;
+ unsigned int min, max;
if (!get_brightness_range(caps, &min, &max))
return brightness;
@@ -4148,6 +4092,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
int bl_idx)
{
+ int ret;
struct amdgpu_dm_backlight_caps caps;
struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
@@ -4162,13 +4107,14 @@ static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
if (!rc)
return dm->brightness[bl_idx];
return convert_brightness_to_user(&caps, avg);
- } else {
- int ret = dc_link_get_backlight_level(link);
-
- if (ret == DC_ERROR_UNEXPECTED)
- return dm->brightness[bl_idx];
- return convert_brightness_to_user(&caps, ret);
}
+
+ ret = dc_link_get_backlight_level(link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return dm->brightness[bl_idx];
+
+ return convert_brightness_to_user(&caps, ret);
}
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@ -4562,7 +4508,6 @@ fail:
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
drm_atomic_private_obj_fini(&dm->atomic_obj);
- return;
}
/******************************************************************************
@@ -5394,6 +5339,7 @@ static bool adjust_colour_depth_from_display_info(
{
enum dc_color_depth depth = timing_out->display_color_depth;
int normalized_clk;
+
do {
normalized_clk = timing_out->pix_clk_100hz / 10;
/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
@@ -5609,6 +5555,7 @@ create_fake_sink(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct dc_sink *sink = NULL;
+
sink_init_data.link = aconnector->dc_link;
sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
@@ -5732,7 +5679,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
return &aconnector->freesync_vid_base;
/* Find the preferred mode */
- list_for_each_entry (m, list_head, head) {
+ list_for_each_entry(m, list_head, head) {
if (m->type & DRM_MODE_TYPE_PREFERRED) {
m_pref = m;
break;
@@ -5756,7 +5703,7 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
* For some monitors, preferred mode is not the mode with highest
* supported refresh rate.
*/
- list_for_each_entry (m, list_head, head) {
+ list_for_each_entry(m, list_head, head) {
current_refresh = drm_mode_vrefresh(m);
if (m->hdisplay == m_pref->hdisplay &&
@@ -5849,6 +5796,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
edp_min_bpp_x16, edp_max_bpp_x16,
dsc_caps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&bw_range)) {
if (bw_range.max_kbps < link_bw_in_kbps) {
@@ -5857,6 +5805,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
&dsc_options,
0,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&dsc_cfg)) {
stream->timing.dsc_cfg = dsc_cfg;
stream->timing.flags.DSC = 1;
@@ -5871,6 +5820,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
&dsc_options,
link_bw_in_kbps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&dsc_cfg)) {
stream->timing.dsc_cfg = dsc_cfg;
stream->timing.flags.DSC = 1;
@@ -5914,12 +5864,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
&dsc_options,
link_bandwidth_kbps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
- timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link));
max_supported_bw_in_kbps = link_bandwidth_kbps;
dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
@@ -5931,6 +5883,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
&dsc_options,
dsc_max_supported_bw_in_kbps,
&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
@@ -6028,7 +5981,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
* This may not be an error, the use case is when we have no
* usermode calls to reset and set mode upon hotplug. In this
* case, we call set mode ourselves to restore the previous mode
- * and the modelist may not be filled in in time.
+ * and the modelist may not be filled in time.
*/
DRM_DEBUG_DRIVER("No preferred mode found\n");
} else {
@@ -6051,9 +6004,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
drm_mode_set_crtcinfo(&mode, 0);
/*
- * If scaling is enabled and refresh rate didn't change
- * we copy the vic and polarities of the old timings
- */
+ * If scaling is enabled and refresh rate didn't change
+ * we copy the vic and polarities of the old timings
+ */
if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(
stream, &mode, &aconnector->base, con_state, NULL,
@@ -6817,6 +6770,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
+
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
aconnector->force_yuv420_output;
color_depth = convert_color_depth_from_display_info(connector,
@@ -7135,7 +7089,7 @@ static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
{
struct drm_display_mode *m;
- list_for_each_entry (m, &aconnector->base.probed_modes, head) {
+ list_for_each_entry(m, &aconnector->base.probed_modes, head) {
if (drm_mode_equal(m, mode))
return true;
}
@@ -7295,6 +7249,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
mutex_init(&aconnector->hpd_lock);
+ mutex_init(&aconnector->handle_mst_msg_ready);
/*
* configure support HPD hot plug connector_>polled default value is 0
@@ -7454,7 +7409,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
link->priv = aconnector;
- DRM_DEBUG_DRIVER("%s()\n", __func__);
i2c = create_i2c(link->ddc, link->link_index, &res);
if (!i2c) {
@@ -7982,7 +7936,6 @@ static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
- struct dc_state *dc_state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
struct drm_crtc *pcrtc,
@@ -8125,7 +8078,15 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* Only allow immediate flips for fast updates that don't
* change memory domain, FB pitch, DCC state, rotation or
* mirroring.
+ *
+ * dm_crtc_helper_atomic_check() only accepts async flips with
+ * fast updates.
*/
+ if (crtc->state->async_flip &&
+ acrtc_state->update_type != UPDATE_TYPE_FAST)
+ drm_warn_once(state->dev,
+ "[PLANE:%d:%s] async flip with non-fast update\n",
+ plane->base.id, plane->name);
bundle->flip_addrs[planes_count].flip_immediate =
crtc->state->async_flip &&
acrtc_state->update_type == UPDATE_TYPE_FAST &&
@@ -8168,8 +8129,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* DRI3/Present extension with defined target_msc.
*/
last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
- }
- else {
+ } else {
/* For variable refresh rate mode only:
* Get vblank of last completed flip to avoid > 1 vrr
* flips per video frame by use of throttling, but allow
@@ -8455,55 +8415,20 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat
stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
-/**
- * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
- * @state: The atomic state to commit
- *
- * This will tell DC to commit the constructed DC state from atomic_check,
- * programming the hardware. Any failures here implies a hardware failure, since
- * atomic check should have filtered anything non-kosher.
- */
-static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_display_manager *dm = &adev->dm;
- struct dm_atomic_state *dm_state;
- struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
- u32 i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
- unsigned long flags;
- bool wait_for_vblank = true;
- struct drm_connector *connector;
- struct drm_connector_state *old_con_state, *new_con_state;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
- int crtc_disable_count = 0;
bool mode_set_reset_required = false;
- int r;
-
- trace_amdgpu_dm_atomic_commit_tail_begin(state);
-
- r = drm_atomic_helper_wait_for_fences(dev, state, false);
- if (unlikely(r))
- DRM_ERROR("Waiting for fences timed out!");
-
- drm_atomic_helper_update_legacy_modeset_state(dev, state);
- drm_dp_mst_atomic_wait_for_dependencies(state);
-
- dm_state = dm_atomic_get_new_state(state);
- if (dm_state && dm_state->context) {
- dc_state = dm_state->context;
- } else {
- /* No state changes, retain current state. */
- dc_state_temp = dc_create_state(dm->dc);
- ASSERT(dc_state_temp);
- dc_state = dc_state_temp;
- dc_resource_state_copy_construct_current(dm->dc, dc_state);
- }
+ u32 i;
- for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
- new_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -8526,9 +8451,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
drm_dbg_state(state->dev,
- "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
- "planes_changed:%d, mode_changed:%d,active_changed:%d,"
- "connectors_changed:%d\n",
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
acrtc->crtc_id,
new_crtc_state->enable,
new_crtc_state->active,
@@ -8601,24 +8524,22 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
} /* for_each_crtc_in_state() */
- if (dc_state) {
- /* if there mode set or reset, disable eDP PSR */
- if (mode_set_reset_required) {
- if (dm->vblank_control_workqueue)
- flush_workqueue(dm->vblank_control_workqueue);
+ /* if there mode set or reset, disable eDP PSR */
+ if (mode_set_reset_required) {
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
- amdgpu_dm_psr_disable_all(dm);
- }
+ amdgpu_dm_psr_disable_all(dm);
+ }
- dm_enable_per_frame_crtc_master_sync(dc_state);
- mutex_lock(&dm->dc_lock);
- WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
- /* Allow idle optimization when vblank count is 0 for display off */
- if (dm->active_vblank_irq_count == 0)
- dc_allow_idle_optimizations(dm->dc, true);
- mutex_unlock(&dm->dc_lock);
- }
+ /* Allow idle optimization when vblank count is 0 for display off */
+ if (dm->active_vblank_irq_count == 0)
+ dc_allow_idle_optimizations(dm->dc, true);
+ mutex_unlock(&dm->dc_lock);
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
@@ -8638,6 +8559,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->otg_inst = status->primary_otg_inst;
}
}
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
@@ -8760,13 +8719,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
- struct dc_surface_update dummy_updates[MAX_SURFACES];
+ struct dc_surface_update *dummy_updates;
struct dc_stream_update stream_update;
struct dc_info_packet hdr_packet;
struct dc_stream_status *status = NULL;
bool abm_changed, hdr_changed, scaling_changed;
- memset(&dummy_updates, 0, sizeof(dummy_updates));
memset(&stream_update, 0, sizeof(stream_update));
if (acrtc) {
@@ -8825,6 +8783,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* Here we create an empty update on each plane.
* To fix this, DC should permit updating only stream properties.
*/
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
for (j = 0; j < status->plane_count; j++)
dummy_updates[j].surface = status->plane_states[0];
@@ -8836,6 +8795,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state->stream,
&stream_update);
mutex_unlock(&dm->dc_lock);
+ kfree(dummy_updates);
}
/**
@@ -8914,8 +8874,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream)
- amdgpu_dm_commit_planes(state, dc_state, dev,
- dm, crtc, wait_for_vblank);
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
}
/* Update audio instances for each connector. */
@@ -8970,9 +8929,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
for (i = 0; i < crtc_disable_count; i++)
pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
-
- if (dc_state_temp)
- dc_release_state(dc_state_temp);
}
static int dm_force_atomic_commit(struct drm_connector *connector)
@@ -9104,8 +9060,8 @@ static int do_aquire_global_lock(struct drm_device *dev,
&commit->flip_done, 10*HZ);
if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
- "timed out\n", crtc->base.id, crtc->name);
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ crtc->base.id, crtc->name);
drm_crtc_commit_put(commit);
}
@@ -9190,7 +9146,8 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
return false;
}
-static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
+static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
+{
u64 num, den, res;
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
@@ -9312,9 +9269,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
goto skip_modeset;
drm_dbg_state(state->dev,
- "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
- "planes_changed:%d, mode_changed:%d,active_changed:%d,"
- "connectors_changed:%d\n",
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
acrtc->crtc_id,
new_crtc_state->enable,
new_crtc_state->active,
@@ -9343,8 +9298,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
old_crtc_state)) {
new_crtc_state->mode_changed = false;
DRM_DEBUG_DRIVER(
- "Mode change not required for front porch change, "
- "setting mode_changed to %d",
+ "Mode change not required for front porch change, setting mode_changed to %d",
new_crtc_state->mode_changed);
set_freesync_fixed_config(dm_new_crtc_state);
@@ -9356,9 +9310,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct drm_display_mode *high_mode;
high_mode = get_highest_refresh_rate_mode(aconnector, false);
- if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
+ if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
set_freesync_fixed_config(dm_new_crtc_state);
- }
}
ret = dm_atomic_get_state(state, &dm_state);
@@ -9526,6 +9479,7 @@ static bool should_reset_plane(struct drm_atomic_state *state,
*/
for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
struct amdgpu_framebuffer *old_afb, *new_afb;
+
if (other->type == DRM_PLANE_TYPE_CURSOR)
continue;
@@ -9624,11 +9578,12 @@ static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
}
/* Core DRM takes care of checking FB modifiers, so we only need to
- * check tiling flags when the FB doesn't have a modifier. */
+ * check tiling flags when the FB doesn't have a modifier.
+ */
if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
if (adev->family < AMDGPU_FAMILY_AI) {
linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
- AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
} else {
linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
@@ -9762,8 +9717,8 @@ static int dm_update_plane_state(struct dc *dc,
if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
return -EINVAL;
- else
- *is_top_most_overlay = false;
+
+ *is_top_most_overlay = false;
}
DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
@@ -9850,12 +9805,12 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
* cursor per pipe but it's going to inherit the scaling and
* positioning from the underlying pipe. Check the cursor plane's
- * blending properties match the underlying planes'. */
+ * blending properties match the underlying planes'.
+ */
new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
- if (!new_cursor_state || !new_cursor_state->fb) {
+ if (!new_cursor_state || !new_cursor_state->fb)
return 0;
- }
dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
@@ -9900,6 +9855,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
struct drm_connector_state *conn_state, *old_conn_state;
struct amdgpu_dm_connector *aconnector = NULL;
int i;
+
for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
if (!conn_state->crtc)
conn_state = old_conn_state;
@@ -10334,7 +10290,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
/* Store the overall update type for use later in atomic check. */
- for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
struct dm_crtc_state *dm_new_crtc_state =
to_dm_crtc_state(new_crtc_state);
@@ -10356,7 +10312,7 @@ fail:
else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
else
- DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
trace_amdgpu_dm_atomic_check_finish(state, ret);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 4561f55afa99..9fb5bb3a75a7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -195,6 +195,11 @@ struct hpd_rx_irq_offload_work_queue {
*/
bool is_handling_link_loss;
/**
+ * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
+ * ready event when we're already handling mst message ready event
+ */
+ bool is_handling_mst_msg_rdy_event;
+ /**
* @aconnector: The aconnector that this work queue is attached to
*/
struct amdgpu_dm_connector *aconnector;
@@ -638,6 +643,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
+ struct mutex handle_mst_msg_ready;
+
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 0802f8e8fac5..52ecfa746b54 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -123,9 +123,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
crtc = secure_display_ctx->crtc;
- if (!crtc) {
+ if (!crtc)
return;
- }
psp = &drm_to_adev(crtc->dev)->psp;
@@ -151,9 +150,8 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
if (!ret) {
- if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS)
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
- }
}
mutex_unlock(&psp->securedisplay_context.mutex);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 440fc0869a34..30d4c6fd95f5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ /*
+ * Only allow async flips for fast updates that don't change the FB
+ * pitch, the DCC state, rotation, etc.
+ */
+ if (crtc_state->async_flip &&
+ dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+ crtc->base.id, crtc->name);
+ return -EINVAL;
+ }
+
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index d63ee636483b..7c21e21bcc51 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1075,24 +1075,24 @@ static int amdgpu_current_colorspace_show(struct seq_file *m, void *data)
switch (dm_crtc_state->stream->output_color_space) {
case COLOR_SPACE_SRGB:
- seq_printf(m, "sRGB");
+ seq_puts(m, "sRGB");
break;
case COLOR_SPACE_YCBCR601:
case COLOR_SPACE_YCBCR601_LIMITED:
- seq_printf(m, "BT601_YCC");
+ seq_puts(m, "BT601_YCC");
break;
case COLOR_SPACE_YCBCR709:
case COLOR_SPACE_YCBCR709_LIMITED:
- seq_printf(m, "BT709_YCC");
+ seq_puts(m, "BT709_YCC");
break;
case COLOR_SPACE_ADOBERGB:
- seq_printf(m, "opRGB");
+ seq_puts(m, "opRGB");
break;
case COLOR_SPACE_2020_RGB_FULLRANGE:
- seq_printf(m, "BT2020_RGB");
+ seq_puts(m, "BT2020_RGB");
break;
case COLOR_SPACE_2020_YCBCR:
- seq_printf(m, "BT2020_YCC");
+ seq_puts(m, "BT2020_YCC");
break;
default:
goto unlock;
@@ -3022,7 +3022,7 @@ static int edp_ilr_show(struct seq_file *m, void *unused)
seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz);
}
} else {
- seq_printf(m, "ILR is not supported by this eDP panel.\n");
+ seq_puts(m, "ILR is not supported by this eDP panel.\n");
}
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 5536d17306d0..8db47f66eac0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -39,10 +39,10 @@
static bool
lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
{
-
struct dc_link *link = handle;
struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} };
- struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+ struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW,
+ link->dc->caps.i2c_speed_in_khz};
return dm_helpers_submit_i2c(link->ctx, link, &cmd);
}
@@ -52,8 +52,10 @@ lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint3
{
struct dc_link *link = handle;
- struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} };
- struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz};
+ struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset},
+ {false, address, size, data} };
+ struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW,
+ link->dc->caps.i2c_speed_in_khz};
return dm_helpers_submit_i2c(link->ctx, link, &cmd);
}
@@ -76,7 +78,6 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
{
-
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.context.initialized) {
@@ -96,13 +97,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint
*srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
*srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
-
return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
}
-static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
+static int psp_set_srm(struct psp_context *psp,
+ u8 *srm, uint32_t srm_size, uint32_t *srm_version)
{
-
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.context.initialized) {
@@ -119,7 +119,8 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size,
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+ hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
return -EINVAL;
@@ -150,7 +151,6 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
static void link_lock(struct hdcp_workqueue *work, bool lock)
{
-
int i = 0;
for (i = 0; i < work->max_link; i++) {
@@ -160,10 +160,11 @@ static void link_lock(struct hdcp_workqueue *work, bool lock)
mutex_unlock(&work[i].mutex);
}
}
+
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int link_index,
struct amdgpu_dm_connector *aconnector,
- uint8_t content_type,
+ u8 content_type,
bool enable_encryption)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -178,18 +179,19 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
query.display = NULL;
mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
- if (query.display != NULL) {
+ if (query.display) {
memcpy(display, query.display, sizeof(struct mod_hdcp_display));
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
if (enable_encryption) {
- /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
- * (s3 resume case)
+ /* Explicitly set the saved SRM as sysfs call will be after
+ * we already enabled hdcp (s3 resume case)
*/
if (hdcp_work->srm_size > 0)
- psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
+ psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm,
+ hdcp_work->srm_size,
&hdcp_work->srm_version);
display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE;
@@ -219,7 +221,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
}
static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
- unsigned int link_index,
+ unsigned int link_index,
struct amdgpu_dm_connector *aconnector)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -238,7 +240,8 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n",
- aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms);
+ aconnector->base.index, conn_state->hdcp_content_type,
+ aconnector->base.dpms);
}
mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output);
@@ -246,6 +249,7 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
process_output(hdcp_w);
mutex_unlock(&hdcp_w->mutex);
}
+
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
@@ -274,15 +278,12 @@ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index
schedule_work(&hdcp_w->cpirq_work);
}
-
-
-
static void event_callback(struct work_struct *work)
{
struct hdcp_workqueue *hdcp_work;
hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue,
- callback_dwork);
+ callback_dwork);
mutex_lock(&hdcp_work->mutex);
@@ -294,13 +295,12 @@ static void event_callback(struct work_struct *work)
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
-
-
}
static void event_property_update(struct work_struct *work)
{
- struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
+ struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue,
+ property_update_work);
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_device *dev;
long ret;
@@ -334,11 +334,10 @@ static void event_property_update(struct work_struct *work)
mutex_lock(&hdcp_work->mutex);
if (conn_state->commit) {
- ret = wait_for_completion_interruptible_timeout(
- &conn_state->commit->hw_done, 10 * HZ);
+ ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done,
+ 10 * HZ);
if (ret == 0) {
- DRM_ERROR(
- "HDCP state unknown! Setting it to DESIRED");
+ DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n");
hdcp_work->encryption_status[conn_index] =
MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
}
@@ -349,24 +348,20 @@ static void event_property_update(struct work_struct *work)
DRM_MODE_HDCP_CONTENT_TYPE0 &&
hdcp_work->encryption_status[conn_index] <=
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) {
-
DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n");
- drm_hdcp_update_content_protection(
- connector,
- DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ drm_hdcp_update_content_protection(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED);
} else if (conn_state->hdcp_content_type ==
DRM_MODE_HDCP_CONTENT_TYPE1 &&
hdcp_work->encryption_status[conn_index] ==
MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) {
- drm_hdcp_update_content_protection(
- connector,
- DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ drm_hdcp_update_content_protection(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED);
}
} else {
DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n");
- drm_hdcp_update_content_protection(
- connector, DRM_MODE_CONTENT_PROTECTION_DESIRED);
-
+ drm_hdcp_update_content_protection(connector,
+ DRM_MODE_CONTENT_PROTECTION_DESIRED);
}
mutex_unlock(&hdcp_work->mutex);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -402,7 +397,7 @@ static void event_property_validate(struct work_struct *work)
&query);
DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n",
- aconnector->base.index,
+ aconnector->base.index,
aconnector->base.state->content_protection,
query.encryption_status,
hdcp_work->encryption_status[conn_index]);
@@ -410,7 +405,8 @@ static void event_property_validate(struct work_struct *work)
if (query.encryption_status !=
hdcp_work->encryption_status[conn_index]) {
DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n",
- hdcp_work->encryption_status[conn_index], query.encryption_status);
+ hdcp_work->encryption_status[conn_index],
+ query.encryption_status);
hdcp_work->encryption_status[conn_index] =
query.encryption_status;
@@ -429,7 +425,7 @@ static void event_watchdog_timer(struct work_struct *work)
struct hdcp_workqueue *hdcp_work;
hdcp_work = container_of(to_delayed_work(work),
- struct hdcp_workqueue,
+ struct hdcp_workqueue,
watchdog_timer_dwork);
mutex_lock(&hdcp_work->mutex);
@@ -443,7 +439,6 @@ static void event_watchdog_timer(struct work_struct *work)
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
-
}
static void event_cpirq(struct work_struct *work)
@@ -459,10 +454,8 @@ static void event_cpirq(struct work_struct *work)
process_output(hdcp_work);
mutex_unlock(&hdcp_work->mutex);
-
}
-
void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
{
int i = 0;
@@ -478,10 +471,8 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work)
kfree(hdcp_work);
}
-
static bool enable_assr(void *handle, struct dc_link *link)
{
-
struct hdcp_workqueue *hdcp_work = handle;
struct mod_hdcp hdcp = hdcp_work->hdcp;
struct psp_context *psp = hdcp.config.psp.handle;
@@ -499,7 +490,8 @@ static bool enable_assr(void *handle, struct dc_link *link)
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE;
- dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst;
+ dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index =
+ link->link_enc_hw_inst;
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
@@ -541,7 +533,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
else if (aconnector->dc_em_sink)
sink = aconnector->dc_em_sink;
- if (sink != NULL)
+ if (sink)
link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal);
display->controller = CONTROLLER_ID_D0 + config->otg_inst;
@@ -567,16 +559,20 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
conn_state = aconnector->base.state;
DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index,
- (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1,
- (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1);
+ (!!aconnector->base.state) ?
+ aconnector->base.state->content_protection : -1,
+ (!!aconnector->base.state) ?
+ aconnector->base.state->hdcp_content_type : -1);
if (conn_state)
hdcp_update_display(hdcp_work, link_index, aconnector,
- conn_state->hdcp_content_type, false);
+ conn_state->hdcp_content_type, false);
}
-
-/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
+/**
+ * DOC: Add sysfs interface for set/get srm
+ *
+ * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
* will automatically call once or twice depending on the size
*
* call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
@@ -587,23 +583,23 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
* sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
* the last call we will send the full SRM. PSP will fail on every call before the last.
*
- * This means we don't know if the SRM is good until the last call. And because of this limitation we
- * cannot throw errors early as it will stop the kernel from writing to sysfs
+ * This means we don't know if the SRM is good until the last call. And because of this
+ * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs
*
* Example 1:
- * Good SRM size = 5096
- * first call to write 4096 -> PSP fails
- * Second call to write 1000 -> PSP Pass -> SRM is set
+ * Good SRM size = 5096
+ * first call to write 4096 -> PSP fails
+ * Second call to write 1000 -> PSP Pass -> SRM is set
*
* Example 2:
- * Bad SRM size = 4096
- * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
- * is the last call)
+ * Bad SRM size = 4096
+ * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
+ * is the last call)
*
* Solution?:
- * 1: Parse the SRM? -> It is signed so we don't know the EOF
- * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
- * below
+ * 1: Parse the SRM? -> It is signed so we don't know the EOF
+ * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
+ * below
*
* Easy Solution:
* Always call get after Set to verify if set was successful.
@@ -612,20 +608,21 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
* +----------------------+
* PSP will only update its srm if its older than the one we are trying to load.
* Always do set first than get.
- * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
- * version and save it
+ * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
+ * version and save it
*
- * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
- * same(newer) version back and save it
+ * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
+ * same(newer) version back and save it
*
- * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
- * incorrect/corrupted and we should correct our SRM by getting it from PSP
+ * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
+ * incorrect/corrupted and we should correct our SRM by getting it from PSP
*/
-static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+static ssize_t srm_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
- uint32_t srm_version = 0;
+ u32 srm_version = 0;
work = container_of(bin_attr, struct hdcp_workqueue, attr);
link_lock(work, true);
@@ -639,19 +636,19 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bi
work->srm_version = srm_version;
}
-
link_lock(work, false);
return count;
}
-static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+static ssize_t srm_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buffer,
loff_t pos, size_t count)
{
struct hdcp_workqueue *work;
- uint8_t *srm = NULL;
- uint32_t srm_version;
- uint32_t srm_size;
+ u8 *srm = NULL;
+ u32 srm_version;
+ u32 srm_size;
size_t ret = count;
work = container_of(bin_attr, struct hdcp_workqueue, attr);
@@ -684,12 +681,12 @@ ret:
/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
*
* For example,
- * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
- * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
- * across boot/reboots/suspend/resume/shutdown
+ * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
+ * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
+ * across boot/reboots/suspend/resume/shutdown
*
- * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need
- * to make the SRM persistent.
+ * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP
+ * we need to make the SRM persistent.
*
* -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
* -The kernel cannot write to the file systems.
@@ -699,8 +696,8 @@ ret:
*
* Usermode can read/write to/from PSP using the sysfs interface
* For example:
- * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
- * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
+ * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
+ * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
*/
static const struct bin_attribute data_attr = {
.attr = {.name = "hdcp_srm", .mode = 0664},
@@ -709,10 +706,9 @@ static const struct bin_attribute data_attr = {
.read = srm_data_read,
};
-
-struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev,
+ struct cp_psp *cp_psp, struct dc *dc)
{
-
int max_caps = dc->caps.max_links;
struct hdcp_workqueue *hdcp_work;
int i = 0;
@@ -721,14 +717,16 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
if (ZERO_OR_NULL_PTR(hdcp_work))
return NULL;
- hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
+ hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
+ sizeof(*hdcp_work->srm), GFP_KERNEL);
- if (hdcp_work->srm == NULL)
+ if (!hdcp_work->srm)
goto fail_alloc_context;
- hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
+ hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE,
+ sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
- if (hdcp_work->srm_temp == NULL)
+ if (!hdcp_work->srm_temp)
goto fail_alloc_context;
hdcp_work->max_link = max_caps;
@@ -781,10 +779,5 @@ fail_alloc_context:
kfree(hdcp_work);
return NULL;
-
-
-
}
-
-
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index d9a482908380..e94eeeb97688 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -68,15 +68,15 @@ static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps)
}
}
-/* dm_helpers_parse_edid_caps
- *
- * Parse edid caps
+/**
+ * dm_helpers_parse_edid_caps() - Parse edid caps
*
+ * @link: current detected link
* @edid: [in] pointer to edid
- * edid_caps: [in] pointer to edid caps
- * @return
- * void
- * */
+ * @edid_caps: [in] pointer to edid caps
+ *
+ * Return: void
+ */
enum dc_edid_status dm_helpers_parse_edid_caps(
struct dc_link *link,
const struct dc_edid *edid,
@@ -255,7 +255,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
/* Accessing the connector state is required for vcpi_slots allocation
* and directly relies on behaviour in commit check
* that blocks before commit guaranteeing that the state
- * is not gonna be swapped while still in use in commit tail */
+ * is not gonna be swapped while still in use in commit tail
+ */
if (!aconnector || !aconnector->mst_root)
return false;
@@ -282,7 +283,8 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
* stream. AMD ASIC stream slot allocation should follow the same
- * sequence. copy DRM MST allocation to dc */
+ * sequence. copy DRM MST allocation to dc
+ */
fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table);
return true;
@@ -426,7 +428,7 @@ void dm_dtn_log_append_v(struct dc_context *ctx,
total = log_ctx->pos + n + 1;
if (total > log_ctx->size) {
- char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
+ char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL);
if (buf) {
memcpy(buf, log_ctx->buf, log_ctx->pos);
@@ -633,7 +635,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd));
if (ret < 0) {
- DRM_ERROR(" execute_synaptics_rc_command - write cmd ..., err = %d\n", ret);
+ DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret);
return false;
}
@@ -655,7 +657,7 @@ static bool execute_synaptics_rc_command(struct drm_dp_aux *aux,
drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length);
}
- DC_LOG_DC(" execute_synaptics_rc_command - success = %d\n", success);
+ DC_LOG_DC("%s: success = %d\n", __func__, success);
return success;
}
@@ -664,7 +666,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
{
unsigned char data[16] = {0};
- DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n");
+ DC_LOG_DC("Start %s\n", __func__);
// Step 2
data[0] = 'P';
@@ -722,7 +724,7 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux)
if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL))
return;
- DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n");
+ DC_LOG_DC("Done %s\n", __func__);
}
/* MST Dock */
@@ -995,9 +997,8 @@ void dm_helpers_override_panel_settings(
struct dc_panel_config *panel_config)
{
// Feature DSC
- if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) {
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
panel_config->dsc.disable_dsc_edp = true;
- }
}
void *dm_helpers_allocate_gpu_mem(
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 19f543ba7205..51467f132c26 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -120,7 +120,8 @@ static void dm_irq_work_func(struct work_struct *work)
/* Call a DAL subcomponent which registered for interrupt notification
* at INTERRUPT_LOW_IRQ_CONTEXT.
- * (The most common use is HPD interrupt) */
+ * (The most common use is HPD interrupt)
+ */
}
/*
@@ -172,7 +173,8 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
if (handler_removed == false) {
/* Not necessarily an error - caller may not
- * know the context. */
+ * know the context.
+ */
return NULL;
}
@@ -261,7 +263,7 @@ validate_irq_registration_params(struct dc_interrupt_params *int_params,
static bool validate_irq_unregistration_params(enum dc_irq_source irq_source,
irq_handler_idx handler_idx)
{
- if (DAL_INVALID_IRQ_HANDLER_IDX == handler_idx) {
+ if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) {
DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n");
return false;
}
@@ -343,7 +345,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
/* This pointer will be stored by code which requested interrupt
* registration.
* The same pointer will be needed in order to unregister the
- * interrupt. */
+ * interrupt.
+ */
DRM_DEBUG_KMS(
"DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n",
@@ -390,7 +393,8 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
if (handler_list == NULL) {
/* If we got here, it means we searched all irq contexts
- * for this irq source, but the handler was not found. */
+ * for this irq source, but the handler was not found.
+ */
DRM_ERROR(
"DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n",
ih, irq_source);
@@ -450,7 +454,8 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
/* The handler was removed from the table,
* it means it is safe to flush all the 'work'
- * (because no code can schedule a new one). */
+ * (because no code can schedule a new one).
+ */
lh = &adev->dm.irq_handler_list_low_tab[src];
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
@@ -494,7 +499,7 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
if (!list_empty(hnd_list_l)) {
- list_for_each_safe (entry, tmp, hnd_list_l) {
+ list_for_each_safe(entry, tmp, hnd_list_l) {
handler = list_entry(
entry,
struct amdgpu_dm_irq_handler_data,
@@ -571,7 +576,7 @@ static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
if (list_empty(handler_list))
return;
- list_for_each_entry (handler_data, handler_list, list) {
+ list_for_each_entry(handler_data, handler_list, list) {
if (queue_work(system_highpri_wq, &handler_data->work)) {
work_queued = true;
break;
@@ -627,7 +632,8 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev,
&adev->dm.irq_handler_list_high_tab[irq_source],
list) {
/* Call a subcomponent which registered for immediate
- * interrupt notification */
+ * interrupt notification
+ */
handler_data->handler(handler_data->handler_arg);
}
@@ -664,7 +670,7 @@ static int amdgpu_dm_irq_handler(struct amdgpu_device *adev,
return 0;
}
-static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
+static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type)
{
switch (type) {
case AMDGPU_HPD_1:
@@ -686,7 +692,7 @@ static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned type)
static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned type,
+ unsigned int type,
enum amdgpu_interrupt_state state)
{
enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type);
@@ -698,7 +704,7 @@ static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev,
static inline int dm_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned crtc_id,
+ unsigned int crtc_id,
enum amdgpu_interrupt_state state,
const enum irq_type dal_irq_type,
const char *func)
@@ -729,7 +735,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned crtc_id,
+ unsigned int crtc_id,
enum amdgpu_interrupt_state state)
{
return dm_irq_state(
@@ -743,7 +749,7 @@ static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev,
static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
- unsigned crtc_id,
+ unsigned int crtc_id,
enum amdgpu_interrupt_state state)
{
return dm_irq_state(
@@ -893,13 +899,13 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd,
true);
}
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd_rx,
true);
@@ -928,13 +934,13 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
to_amdgpu_dm_connector(connector);
const struct dc_link *dc_link = amdgpu_dm_connector->dc_link;
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd,
false);
}
- if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
dc_interrupt_set(adev->dm.dc,
dc_link->irq_source_hpd_rx,
false);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 46d0a8f57e55..943959012d04 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -296,6 +296,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!aconnector->edid) {
struct edid *edid;
+
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);
if (!edid) {
@@ -619,8 +620,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return connector;
}
+void dm_handle_mst_sideband_msg_ready_event(
+ struct drm_dp_mst_topology_mgr *mgr,
+ enum mst_msg_ready_type msg_rdy_type)
+{
+ uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ uint8_t dret;
+ bool new_irq_handled = false;
+ int dpcd_addr;
+ uint8_t dpcd_bytes_to_read;
+ const uint8_t max_process_count = 30;
+ uint8_t process_count = 0;
+ u8 retry;
+ struct amdgpu_dm_connector *aconnector =
+ container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+
+
+ const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
+
+ if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
+ dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
+ /* DPCD 0x200 - 0x201 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT;
+ } else {
+ dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
+ /* DPCD 0x2002 - 0x2005 for downstream IRQ */
+ dpcd_addr = DP_SINK_COUNT_ESI;
+ }
+
+ mutex_lock(&aconnector->handle_mst_msg_ready);
+
+ while (process_count < max_process_count) {
+ u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
+
+ process_count++;
+
+ dret = drm_dp_dpcd_read(
+ &aconnector->dm_dp_aux.aux,
+ dpcd_addr,
+ esi,
+ dpcd_bytes_to_read);
+
+ if (dret != dpcd_bytes_to_read) {
+ DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
+ break;
+ }
+
+ DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
+
+ switch (msg_rdy_type) {
+ case DOWN_REP_MSG_RDY_EVENT:
+ /* Only handle DOWN_REP_MSG_RDY case*/
+ esi[1] &= DP_DOWN_REP_MSG_RDY;
+ break;
+ case UP_REQ_MSG_RDY_EVENT:
+ /* Only handle UP_REQ_MSG_RDY case*/
+ esi[1] &= DP_UP_REQ_MSG_RDY;
+ break;
+ default:
+ /* Handle both cases*/
+ esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
+ break;
+ }
+
+ if (!esi[1])
+ break;
+
+ /* handle MST irq */
+ if (aconnector->mst_mgr.mst_state)
+ drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
+ esi,
+ ack,
+ &new_irq_handled);
+
+ if (new_irq_handled) {
+ /* ACK at DPCD to notify down stream */
+ for (retry = 0; retry < 3; retry++) {
+ ssize_t wret;
+
+ wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
+ dpcd_addr + 1,
+ ack[1]);
+ if (wret == 1)
+ break;
+ }
+
+ if (retry == 3) {
+ DRM_ERROR("Failed to ack MST event.\n");
+ break;
+ }
+
+ drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
+
+ new_irq_handled = false;
+ } else {
+ break;
+ }
+ }
+
+ mutex_unlock(&aconnector->handle_mst_msg_ready);
+
+ if (process_count == max_process_count)
+ DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
+}
+
+static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
+{
+ dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
+}
+
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
+ .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
@@ -717,6 +828,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
&dsc_options,
0,
params[i].timing,
+ dc_link_get_highest_encoding_format(params[i].aconnector->dc_link),
&params[i].timing->dsc_cfg)) {
params[i].timing->flags.DSC = 1;
@@ -767,7 +879,9 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
&dsc_options,
- (int) kbps, param.timing, &dsc_config);
+ (int) kbps, param.timing,
+ dc_link_get_highest_encoding_format(param.aconnector->dc_link),
+ &dsc_config);
return dsc_config.bits_per_pixel;
}
@@ -1005,8 +1119,11 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
dsc_policy.min_target_bpp * 16,
dsc_policy.max_target_bpp * 16,
&stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, &params[count].bw_range))
- params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ &stream->timing,
+ dc_link_get_highest_encoding_format(dc_link),
+ &params[count].bw_range))
+ params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(dc_link));
count++;
}
@@ -1466,7 +1583,7 @@ static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
dsc_policy.min_target_bpp * 16,
dsc_policy.max_target_bpp * 16,
&stream->sink->dsc_caps.dsc_dec_caps,
- &stream->timing, bw_range);
+ &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range);
return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 1e4ede1e57ab..37c820ab0fdb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -49,6 +49,13 @@
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
+enum mst_msg_ready_type {
+ NONE_MSG_RDY_EVENT = 0,
+ DOWN_REP_MSG_RDY_EVENT = 1,
+ UP_REQ_MSG_RDY_EVENT = 2,
+ DOWN_OR_UP_MSG_RDY_EVENT = 3
+};
+
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
+void dm_handle_mst_sideband_msg_ready_event(
+ struct drm_dp_mst_topology_mgr *mgr,
+ enum mst_msg_ready_type msg_rdy_type);
+
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 322668973747..2198df96ed6f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -113,6 +113,11 @@ void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state
DRM_FORMAT_ARGB8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_ARGB16161616,
+ DRM_FORMAT_ABGR16161616,
+ DRM_FORMAT_ARGB16161616F,
};
uint32_t format = plane_state->fb->format->format;
unsigned int i;
@@ -164,7 +169,7 @@ static bool modifier_has_dcc(uint64_t modifier)
return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
}
-static unsigned modifier_gfx9_swizzle_mode(uint64_t modifier)
+static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier)
{
if (modifier == DRM_FORMAT_MOD_LINEAR)
return 0;
@@ -581,7 +586,7 @@ static void add_gfx11_modifiers(struct amdgpu_device *adev,
int pkrs = 0;
u32 gb_addr_config;
u8 i = 0;
- unsigned swizzle_r_x;
+ unsigned int swizzle_r_x;
uint64_t modifier_r_x;
uint64_t modifier_dcc_best;
uint64_t modifier_dcc_4k;
@@ -698,8 +703,8 @@ static int get_plane_formats(const struct drm_plane *plane,
* caps list.
*/
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY ||
+ (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) {
for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
if (num_formats >= max_formats)
break;
@@ -717,25 +722,29 @@ static int get_plane_formats(const struct drm_plane *plane,
formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
}
- break;
+ } else {
+ switch (plane->type) {
+ case DRM_PLANE_TYPE_OVERLAY:
+ for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
- case DRM_PLANE_TYPE_OVERLAY:
- for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
- if (num_formats >= max_formats)
- break;
+ formats[num_formats++] = overlay_formats[i];
+ }
+ break;
- formats[num_formats++] = overlay_formats[i];
- }
- break;
+ case DRM_PLANE_TYPE_CURSOR:
+ for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
- case DRM_PLANE_TYPE_CURSOR:
- for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
- if (num_formats >= max_formats)
- break;
+ formats[num_formats++] = cursor_formats[i];
+ }
+ break;
- formats[num_formats++] = cursor_formats[i];
+ default:
+ break;
}
- break;
}
return num_formats;
@@ -1459,6 +1468,15 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_blend_mode_property(plane, blend_caps);
}
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(plane, 0);
+ } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ unsigned int zpos = 1 + drm_plane_index(plane);
+ drm_plane_create_zpos_property(plane, zpos, 1, 254);
+ } else if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ drm_plane_create_zpos_immutable_property(plane, 255);
+ }
+
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
plane_cap &&
(plane_cap->pixel_format_support.nv12 ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index 75284e2cec74..848c5b4bb301 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -334,7 +334,8 @@ bool dm_pp_get_clock_levels_by_type(
if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
/* This clock is higher the validation clock.
* Than means the previous one is the highest
- * non-boosted one. */
+ * non-boosted one.
+ */
DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
dc_clks->num_levels, i);
dc_clks->num_levels = i > 0 ? i : 1;
@@ -406,10 +407,10 @@ bool dm_pp_notify_wm_clock_changes(
* TODO: expand this to other ASICs
*/
if ((adev->asic_type >= CHIP_POLARIS10) &&
- (adev->asic_type <= CHIP_VEGAM) &&
- !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
- (void *)wm_with_clock_ranges))
- return true;
+ (adev->asic_type <= CHIP_VEGAM) &&
+ !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev,
+ (void *)wm_with_clock_ranges))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 4f61d4f257cd..08ce3bb8f640 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -166,6 +166,7 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*/
if (vsync_rate_hz != 0) {
unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
+
num_frames_static = (30000 / frame_time_microsec) + 1;
}