diff options
author | Dave Airlie <airlied@redhat.com> | 2023-12-05 12:11:41 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2023-12-05 12:11:41 +1000 |
commit | 5edfd7d94b0310b74136b666551f1d23711ed445 (patch) | |
tree | 5c9f48d16e659e1166a04e3374ab953f62ad7190 /drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | |
parent | a13fee31f56449fc600d9e064c7b32302f92dcef (diff) | |
parent | b719a9c15d52d4f56bdea8241a5d90fd9197ce99 (diff) |
Merge tag 'amd-drm-next-6.8-2023-12-01' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.8-2023-12-01:
amdgpu:
- Add new 64 bit sequence number infrastructure.
This will ultimately be used for user queue synchronization.
- GPUVM updates
- Misc code cleanups
- RAS updates
- DCN 3.5 updates
- Rework PCIe link speed handling
- Document GPU reset types
- DMUB fixes
- eDP fixes
- NBIO 7.9 updates
- NBIO 7.11 updates
- SubVP updates
- DCN 3.1.4 fixes
- ABM fixes
- AGP aperture fix
- DCN 3.1.5 fix
- Fix some potential error path memory leaks
- Enable PCIe PMEs
- Add XGMI, PCIe state dumping for aqua vanjaram
- GFX11 golden register updates
- Misc display fixes
amdkfd:
- Migrate TLB flushing logic to amdgpu
- Trap handler fixes
- Fix restore workers handling on suspend and reset
- Fix possible memory leak in pqm_uninit()
radeon:
- Fix some possible overflows in command buffer checking
- Check for errors in ring_lock
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231201181743.5313-1-alexander.deucher@amd.com
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c')
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 53 |
1 files changed, 33 insertions, 20 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index eb6121ad92fd..b4696ec621c4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -334,15 +334,14 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( return ACT_SUCCESS; } -bool dm_helpers_dp_mst_send_payload_allocation( +void dm_helpers_dp_mst_send_payload_allocation( struct dc_context *ctx, - const struct dc_stream_state *stream, - bool enable) + const struct dc_stream_state *stream) { struct amdgpu_dm_connector *aconnector; struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_atomic_payload *new_payload, old_payload; + struct drm_dp_mst_atomic_payload *new_payload; enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; int ret = 0; @@ -350,25 +349,13 @@ bool dm_helpers_dp_mst_send_payload_allocation( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_root) - return false; + return; mst_mgr = &aconnector->mst_root->mst_mgr; mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); - new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); - if (!enable) { - set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; - clr_flag = MST_ALLOCATE_NEW_PAYLOAD; - } - - if (enable) { - ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); - } else { - dm_helpers_construct_old_payload(mst_mgr, mst_state, - new_payload, &old_payload); - drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); - } + ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload); if (ret) { amdgpu_dm_set_mst_status(&aconnector->mst_status, @@ -379,10 +366,36 @@ bool dm_helpers_dp_mst_send_payload_allocation( amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); } - - return true; } +void dm_helpers_dp_mst_update_mst_mgr_for_deallocation( + struct dc_context *ctx, + const struct dc_stream_state *stream) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_atomic_payload *new_payload, old_payload; + enum mst_progress_status set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; + enum mst_progress_status clr_flag = MST_ALLOCATE_NEW_PAYLOAD; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->mst_root) + return; + + mst_mgr = &aconnector->mst_root->mst_mgr; + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); + new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); + dm_helpers_construct_old_payload(mst_mgr, mst_state, + new_payload, &old_payload); + + drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload); + + amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, true); + amdgpu_dm_set_mst_status(&aconnector->mst_status, clr_flag, false); + } + void dm_dtn_log_begin(struct dc_context *ctx, struct dc_log_buffer_ctx *log_ctx) { |