diff options
author | Dave Airlie <airlied@redhat.com> | 2020-01-13 16:49:01 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-01-13 16:49:02 +1000 |
commit | d5d88cd6ee6b668520c461b960077b5dbd440bc2 (patch) | |
tree | 5c6ed633ebfd18dcf1a65aa26e19ef5c8e6560d3 /drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | |
parent | 79f88da22b4b2d8c56c784de65e24c4b80f59c0c (diff) | |
parent | 0f899fd466b693a129b16994c1b2b4db2fcde4a4 (diff) |
Merge tag 'amd-drm-next-5.6-2020-01-09' of git://people.freedesktop.org/~agd5f/linux into drm-next
amd-drm-next-5.6-2020-01-09:
amdgpu:
- Enable DCN support on POWER
- Enable GFXOFF for Raven1 refresh
- Clean up MM engine idle handlers
- HDMI 2.0 audio fixes
- Fixes for some 10 bpc EDP panels
- Watermark fixes for renoir
- SR-IOV fixes
- Runtime pm robustness fixes
- Arcturus VCN fixes
- RAS fixes
- BACO fixes for Arcturus
- Stable pstate fixes for swSMU
- HDCP fixes
- PSP cleanup
- HDMI fixes
- Misc cleanups
amdkfd:
- Spread interrupt work across cores to reduce latency
- Topology fixes for APUs
- GPU reset improvements
UAPI:
- Enable DRIVER_SYNCOBJ_TIMELINE for vulkan
- Return better error values for kfd process ioctl
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200109230338.8022-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 90 |
1 files changed, 88 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 652cecc030b3..3b025a3f8c7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "amdgpu_vcn.h" +#include "amdgpu_pm.h" #include "soc15.h" #include "soc15d.h" #include "soc15_common.h" @@ -51,6 +52,8 @@ static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_sta static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, struct dpg_pause_state *new_state); +static void vcn_v1_0_idle_work_handler(struct work_struct *work); + /** * vcn_v1_0_early_init - set function pointers * @@ -105,6 +108,9 @@ static int vcn_v1_0_sw_init(void *handle) if (r) return r; + /* Override the work func */ + adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler; + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { const struct common_firmware_header *hdr; hdr = (const struct common_firmware_header *)adev->vcn.fw->data; @@ -1758,6 +1764,86 @@ static int vcn_v1_0_set_powergating_state(void *handle, return ret; } +static void vcn_v1_0_idle_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, vcn.idle_work.work); + unsigned int fences = 0, i; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + struct dpg_pause_state new_state; + + if (fences) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + + if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + else + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + + adev->vcn.pause_dpg_mode(adev, &new_state); + } + + fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec); + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec); + + if (fences == 0) { + amdgpu_gfx_off_ctrl(adev, true); + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + else + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_GATE); + } else { + schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); + } +} + +void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); + + if (set_clocks) { + amdgpu_gfx_off_ctrl(adev, false); + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + else + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); + } + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + struct dpg_pause_state new_state; + unsigned int fences = 0, i; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); + + if (fences) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + + if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + else + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + + adev->vcn.pause_dpg_mode(adev, &new_state); + } +} + static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .name = "vcn_v1_0", .early_init = vcn_v1_0_early_init, @@ -1804,7 +1890,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .insert_start = vcn_v1_0_dec_ring_insert_start, .insert_end = vcn_v1_0_dec_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, + .begin_use = vcn_v1_0_ring_begin_use, .end_use = amdgpu_vcn_ring_end_use, .emit_wreg = vcn_v1_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait, @@ -1836,7 +1922,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { .insert_nop = amdgpu_ring_insert_nop, .insert_end = vcn_v1_0_enc_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, - .begin_use = amdgpu_vcn_ring_begin_use, + .begin_use = vcn_v1_0_ring_begin_use, .end_use = amdgpu_vcn_ring_end_use, .emit_wreg = vcn_v1_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait, |