diff options
author | Thomas Zimmermann <tzimmermann@suse.de> | 2020-08-12 19:17:18 +0200 |
---|---|---|
committer | Thomas Zimmermann <tzimmermann@suse.de> | 2020-08-12 20:42:08 +0200 |
commit | 534b1f9071d95325044c21d47d9f63a45cdf425e (patch) | |
tree | 5c2aa3cd65bfb6b9e73d9ad22c021d800380f0f6 /drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | |
parent | 82dd18096c718962379e61cd8a7a0dc219db174f (diff) | |
parent | 62975d27d647a40c58d3b96c29b911fc4f33c310 (diff) |
Merge drm/drm-next into drm-misc-next
Backmerging drm-next into drm-misc-next for nouveau and panel updates.
Resolves a conflict between ttm and nouveau, where struct ttm_mem_res got
renamed to struct ttm_resource.
Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 153 |
1 files changed, 76 insertions, 77 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 1ad79155ed00..927c330fad21 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -38,6 +38,7 @@ #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h" #include "jpeg_v1_0.h" +#include "vcn_v1_0.h" #define mmUVD_RBC_XX_IB_REG_CHECK_1_0 0x05ab #define mmUVD_RBC_XX_IB_REG_CHECK_1_0_BASE_IDX 1 @@ -360,68 +361,68 @@ static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0, 0xFFFFFFFF, 0); offset = 0; } else { - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst->gpu_addr), 0xFFFFFFFF, 0); offset = size; - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0); } - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0); /* cache window 1: stack */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst->gpu_addr + offset), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE, 0xFFFFFFFF, 0); /* cache window 2: context */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, lower_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.inst->gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE, 0xFFFFFFFF, 0); /* VCN global tiling registers */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); } @@ -636,9 +637,9 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; - WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); - WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); /* enable sw clock gating control */ if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) @@ -667,22 +668,21 @@ static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t s UVD_CGC_CTRL__WCB_MODE_MASK | UVD_CGC_CTRL__VCPU_MODE_MASK | UVD_CGC_CTRL__SCPU_MODE_MASK); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); /* turn off clock gating */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); /* turn on SUVD clock gating */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel); /* turn on sw mode in UVD_SUVD_CGC_CTRL */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel); } static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) { uint32_t data = 0; - int ret; if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT @@ -698,7 +698,7 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF); } else { data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT @@ -712,7 +712,7 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF); } /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */ @@ -728,7 +728,6 @@ static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) { uint32_t data = 0; - int ret; if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { /* Before power off, this indicator has to be turned on */ @@ -763,7 +762,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT); - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF); } } @@ -972,14 +971,14 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK; - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0); /* disable interupt */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN, 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0); /* initialize VCN memory controller */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL, (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | @@ -993,48 +992,48 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) /* swap (8 in 32) RB and IB */ lmi_swap_cntl = 0xa; #endif - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_CNTL, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_CNTL, 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXA0, ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUXB0, ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MPC_SET_MUX, ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0); vcn_v1_0_mc_resume_dpg_mode(adev); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0); /* boot up the VCPU */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0); /* enable UMC */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL2, 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, 0xFFFFFFFF, 0); /* enable master interrupt */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_MASTINT_EN, UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0); vcn_v1_0_clock_gating_dpg_mode(adev, 1); /* setup mmUVD_LMI_CTRL */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_LMI_CTRL, (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | @@ -1046,11 +1045,11 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) tmp = adev->gfx.config.gb_addr_config; /* setup VCN global tiling registers */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); /* enable System Interrupt for JRBC */ - WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN, + WREG32_SOC15_DPG_MODE_1_0(UVD, 0, mmUVD_SYS_INT_EN, UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1); /* force RBC into idle state */ @@ -1112,15 +1111,15 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) */ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) { - int ret_code, tmp; + int tmp; - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | UVD_LMI_STATUS__READ_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_MASK | UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp); /* put VCPU into reset */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), @@ -1129,7 +1128,7 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp); /* disable VCPU clock */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, @@ -1153,30 +1152,29 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev) { - int ret_code = 0; uint32_t tmp; /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */ SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* wait for read ptr to be equal to write ptr */ tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF); tmp = RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF); tmp = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_JRBC_RB_RPTR, tmp, 0xFFFFFFFF); tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); /* disable dynamic power gating mode */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, @@ -1219,9 +1217,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, ret_code = 0; if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK)) - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); if (!ret_code) { /* pause DPG non-jpeg */ @@ -1229,7 +1227,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, - UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); /* Restore */ ring = &adev->vcn.inst->ring_enc[0]; @@ -1251,7 +1249,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } } else { /* unpause dpg non-jpeg, no need to wait */ @@ -1275,9 +1273,9 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, ret_code = 0; if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK)) - SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + ret_code = SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); if (!ret_code) { /* Make sure JPRG Snoop is disabled before sending the pause */ @@ -1290,7 +1288,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, - UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code); + UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK); /* Restore */ ring = &adev->jpeg.inst->ring_dec; @@ -1312,7 +1310,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF); SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, - UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } } else { /* unpause dpg jpeg, no need to wait */ @@ -1335,10 +1333,10 @@ static bool vcn_v1_0_is_idle(void *handle) static int vcn_v1_0_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int ret = 0; + int ret; - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, - UVD_STATUS__IDLE, ret); + ret = SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); return ret; } @@ -1541,7 +1539,7 @@ static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* wait for register write */ - data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2; + data0 = hub->ctx0_ptb_addr_lo32 + vmid * hub->ctx_addr_distance; data1 = lower_32_bits(pd_addr); mask = 0xffffffff; vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask); @@ -1681,7 +1679,8 @@ static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); /* wait for reg writes */ - vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2, + vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + + vmid * hub->ctx_addr_distance, lower_32_bits(pd_addr), 0xffffffff); } |